include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / kernel / traps_64.c
CommitLineData
d979f179 1/* arch/sparc64/kernel/traps.c
1da177e4 2 *
fcd26f7a 3 * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7/*
8 * I like traps on v9, :))))
9 */
10
1da177e4 11#include <linux/module.h>
a2c1e064 12#include <linux/sched.h>
9843099f 13#include <linux/linkage.h>
1da177e4 14#include <linux/kernel.h>
1da177e4
LT
15#include <linux/signal.h>
16#include <linux/smp.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/init.h>
1eeb66a1 19#include <linux/kdebug.h>
5a0e3ad6 20#include <linux/gfp.h>
1da177e4 21
2f4dfe20 22#include <asm/smp.h>
1da177e4
LT
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
6c52a96e 36#include <asm/sfafsr.h>
1da177e4
LT
37#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
92704a1c 40#include <asm/head.h>
07f8e5f3 41#include <asm/prom.h>
881d021a 42#include <asm/memctrl.h>
1da177e4 43
99cd2201 44#include "entry.h"
4f70f7a9 45#include "kstack.h"
1da177e4
LT
46
47/* When an irrecoverable trap occurs at tl > 0, the trap entry
48 * code logs the trap state registers at every level in the trap
49 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
50 * is as follows:
51 */
52struct tl1_traplog {
53 struct {
54 unsigned long tstate;
55 unsigned long tpc;
56 unsigned long tnpc;
57 unsigned long tt;
58 } trapstack[4];
59 unsigned long tl;
60};
61
62static void dump_tl1_traplog(struct tl1_traplog *p)
63{
3d6395cb 64 int i, limit;
1da177e4 65
04d74758
DM
66 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
67 "dumping track stack.\n", p->tl);
3d6395cb
DM
68
69 limit = (tlb_type == hypervisor) ? 2 : 4;
39334a4b 70 for (i = 0; i < limit; i++) {
04d74758 71 printk(KERN_EMERG
1da177e4
LT
72 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
73 "TNPC[%016lx] TT[%lx]\n",
74 i + 1,
75 p->trapstack[i].tstate, p->trapstack[i].tpc,
76 p->trapstack[i].tnpc, p->trapstack[i].tt);
4fe3ebec 77 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
1da177e4
LT
78 }
79}
80
1da177e4
LT
81void bad_trap(struct pt_regs *regs, long lvl)
82{
83 char buffer[32];
84 siginfo_t info;
85
86 if (notify_die(DIE_TRAP, "bad trap", regs,
87 0, lvl, SIGTRAP) == NOTIFY_STOP)
88 return;
89
90 if (lvl < 0x100) {
91 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
92 die_if_kernel(buffer, regs);
93 }
94
95 lvl -= 0x100;
96 if (regs->tstate & TSTATE_PRIV) {
97 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
98 die_if_kernel(buffer, regs);
99 }
100 if (test_thread_flag(TIF_32BIT)) {
101 regs->tpc &= 0xffffffff;
102 regs->tnpc &= 0xffffffff;
103 }
104 info.si_signo = SIGILL;
105 info.si_errno = 0;
106 info.si_code = ILL_ILLTRP;
107 info.si_addr = (void __user *)regs->tpc;
108 info.si_trapno = lvl;
109 force_sig_info(SIGILL, &info, current);
110}
111
112void bad_trap_tl1(struct pt_regs *regs, long lvl)
113{
114 char buffer[32];
115
116 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
117 0, lvl, SIGTRAP) == NOTIFY_STOP)
118 return;
119
120 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
121
122 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
123 die_if_kernel (buffer, regs);
124}
125
126#ifdef CONFIG_DEBUG_BUGVERBOSE
127void do_BUG(const char *file, int line)
128{
129 bust_spinlocks(1);
130 printk("kernel BUG at %s:%d!\n", file, line);
131}
917c3660 132EXPORT_SYMBOL(do_BUG);
1da177e4
LT
133#endif
134
881d021a
DM
135static DEFINE_SPINLOCK(dimm_handler_lock);
136static dimm_printer_t dimm_handler;
137
138static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
139{
140 unsigned long flags;
141 int ret = -ENODEV;
142
143 spin_lock_irqsave(&dimm_handler_lock, flags);
144 if (dimm_handler) {
145 ret = dimm_handler(synd_code, paddr, buf, buflen);
146 } else if (tlb_type == spitfire) {
147 if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
148 ret = -EINVAL;
149 else
150 ret = 0;
151 } else
152 ret = -ENODEV;
153 spin_unlock_irqrestore(&dimm_handler_lock, flags);
154
155 return ret;
156}
157
158int register_dimm_printer(dimm_printer_t func)
159{
160 unsigned long flags;
161 int ret = 0;
162
163 spin_lock_irqsave(&dimm_handler_lock, flags);
164 if (!dimm_handler)
165 dimm_handler = func;
166 else
167 ret = -EEXIST;
168 spin_unlock_irqrestore(&dimm_handler_lock, flags);
169
170 return ret;
171}
41660e9a 172EXPORT_SYMBOL_GPL(register_dimm_printer);
881d021a
DM
173
174void unregister_dimm_printer(dimm_printer_t func)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&dimm_handler_lock, flags);
179 if (dimm_handler == func)
180 dimm_handler = NULL;
181 spin_unlock_irqrestore(&dimm_handler_lock, flags);
182}
41660e9a 183EXPORT_SYMBOL_GPL(unregister_dimm_printer);
881d021a 184
6c52a96e 185void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
186{
187 siginfo_t info;
188
189 if (notify_die(DIE_TRAP, "instruction access exception", regs,
190 0, 0x8, SIGTRAP) == NOTIFY_STOP)
191 return;
192
193 if (regs->tstate & TSTATE_PRIV) {
6c52a96e
DM
194 printk("spitfire_insn_access_exception: SFSR[%016lx] "
195 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
196 die_if_kernel("Iax", regs);
197 }
198 if (test_thread_flag(TIF_32BIT)) {
199 regs->tpc &= 0xffffffff;
200 regs->tnpc &= 0xffffffff;
201 }
202 info.si_signo = SIGSEGV;
203 info.si_errno = 0;
204 info.si_code = SEGV_MAPERR;
205 info.si_addr = (void __user *)regs->tpc;
206 info.si_trapno = 0;
207 force_sig_info(SIGSEGV, &info, current);
208}
209
6c52a96e 210void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
211{
212 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
213 0, 0x8, SIGTRAP) == NOTIFY_STOP)
214 return;
215
216 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 217 spitfire_insn_access_exception(regs, sfsr, sfar);
1da177e4
LT
218}
219
ed6b0b45
DM
220void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
221{
222 unsigned short type = (type_ctx >> 16);
223 unsigned short ctx = (type_ctx & 0xffff);
224 siginfo_t info;
225
226 if (notify_die(DIE_TRAP, "instruction access exception", regs,
227 0, 0x8, SIGTRAP) == NOTIFY_STOP)
228 return;
229
230 if (regs->tstate & TSTATE_PRIV) {
231 printk("sun4v_insn_access_exception: ADDR[%016lx] "
232 "CTX[%04x] TYPE[%04x], going.\n",
233 addr, ctx, type);
234 die_if_kernel("Iax", regs);
235 }
236
237 if (test_thread_flag(TIF_32BIT)) {
238 regs->tpc &= 0xffffffff;
239 regs->tnpc &= 0xffffffff;
240 }
241 info.si_signo = SIGSEGV;
242 info.si_errno = 0;
243 info.si_code = SEGV_MAPERR;
244 info.si_addr = (void __user *) addr;
245 info.si_trapno = 0;
246 force_sig_info(SIGSEGV, &info, current);
247}
248
249void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
250{
251 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
252 0, 0x8, SIGTRAP) == NOTIFY_STOP)
253 return;
254
255 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
256 sun4v_insn_access_exception(regs, addr, type_ctx);
257}
258
6c52a96e 259void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
260{
261 siginfo_t info;
262
263 if (notify_die(DIE_TRAP, "data access exception", regs,
264 0, 0x30, SIGTRAP) == NOTIFY_STOP)
265 return;
266
267 if (regs->tstate & TSTATE_PRIV) {
268 /* Test if this comes from uaccess places. */
8cf14af0 269 const struct exception_table_entry *entry;
1da177e4 270
8cf14af0
DM
271 entry = search_exception_tables(regs->tpc);
272 if (entry) {
273 /* Ouch, somebody is trying VM hole tricks on us... */
1da177e4
LT
274#ifdef DEBUG_EXCEPTIONS
275 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
8cf14af0
DM
276 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
277 regs->tpc, entry->fixup);
1da177e4 278#endif
8cf14af0 279 regs->tpc = entry->fixup;
1da177e4 280 regs->tnpc = regs->tpc + 4;
1da177e4
LT
281 return;
282 }
283 /* Shit... */
6c52a96e
DM
284 printk("spitfire_data_access_exception: SFSR[%016lx] "
285 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
286 die_if_kernel("Dax", regs);
287 }
288
289 info.si_signo = SIGSEGV;
290 info.si_errno = 0;
291 info.si_code = SEGV_MAPERR;
292 info.si_addr = (void __user *)sfar;
293 info.si_trapno = 0;
294 force_sig_info(SIGSEGV, &info, current);
295}
296
6c52a96e 297void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
bde4e4ee
DM
298{
299 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
300 0, 0x30, SIGTRAP) == NOTIFY_STOP)
301 return;
302
303 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 304 spitfire_data_access_exception(regs, sfsr, sfar);
bde4e4ee
DM
305}
306
ed6b0b45
DM
307void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
308{
309 unsigned short type = (type_ctx >> 16);
310 unsigned short ctx = (type_ctx & 0xffff);
311 siginfo_t info;
312
313 if (notify_die(DIE_TRAP, "data access exception", regs,
314 0, 0x8, SIGTRAP) == NOTIFY_STOP)
315 return;
316
317 if (regs->tstate & TSTATE_PRIV) {
fcd26f7a
DM
318 /* Test if this comes from uaccess places. */
319 const struct exception_table_entry *entry;
320
321 entry = search_exception_tables(regs->tpc);
322 if (entry) {
323 /* Ouch, somebody is trying VM hole tricks on us... */
324#ifdef DEBUG_EXCEPTIONS
325 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
326 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
327 regs->tpc, entry->fixup);
328#endif
329 regs->tpc = entry->fixup;
330 regs->tnpc = regs->tpc + 4;
331 return;
332 }
ed6b0b45
DM
333 printk("sun4v_data_access_exception: ADDR[%016lx] "
334 "CTX[%04x] TYPE[%04x], going.\n",
335 addr, ctx, type);
55555633 336 die_if_kernel("Dax", regs);
ed6b0b45
DM
337 }
338
339 if (test_thread_flag(TIF_32BIT)) {
340 regs->tpc &= 0xffffffff;
341 regs->tnpc &= 0xffffffff;
342 }
343 info.si_signo = SIGSEGV;
344 info.si_errno = 0;
345 info.si_code = SEGV_MAPERR;
346 info.si_addr = (void __user *) addr;
347 info.si_trapno = 0;
348 force_sig_info(SIGSEGV, &info, current);
349}
350
351void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
352{
353 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
354 0, 0x8, SIGTRAP) == NOTIFY_STOP)
355 return;
356
357 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
358 sun4v_data_access_exception(regs, addr, type_ctx);
359}
360
1da177e4 361#ifdef CONFIG_PCI
77d10d0e 362#include "pci_impl.h"
1da177e4
LT
363#endif
364
365/* When access exceptions happen, we must do this. */
366static void spitfire_clean_and_reenable_l1_caches(void)
367{
368 unsigned long va;
369
370 if (tlb_type != spitfire)
371 BUG();
372
373 /* Clean 'em. */
374 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
375 spitfire_put_icache_tag(va, 0x0);
376 spitfire_put_dcache_tag(va, 0x0);
377 }
378
379 /* Re-enable in LSU. */
380 __asm__ __volatile__("flush %%g6\n\t"
381 "membar #Sync\n\t"
382 "stxa %0, [%%g0] %1\n\t"
383 "membar #Sync"
384 : /* no outputs */
385 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
386 LSU_CONTROL_IM | LSU_CONTROL_DM),
387 "i" (ASI_LSU_CONTROL)
388 : "memory");
389}
390
6c52a96e 391static void spitfire_enable_estate_errors(void)
1da177e4 392{
6c52a96e
DM
393 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
394 "membar #Sync"
395 : /* no outputs */
396 : "r" (ESTATE_ERR_ALL),
397 "i" (ASI_ESTATE_ERROR_EN));
1da177e4
LT
398}
399
400static char ecc_syndrome_table[] = {
401 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
402 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
403 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
404 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
405 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
406 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
407 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
408 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
409 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
410 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
411 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
412 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
413 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
414 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
415 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
416 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
417 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
418 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
419 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
420 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
421 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
422 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
423 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
424 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
425 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
426 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
427 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
428 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
429 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
430 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
431 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
432 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
433};
434
1da177e4
LT
435static char *syndrome_unknown = "<Unknown>";
436
6c52a96e 437static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
1da177e4 438{
6c52a96e
DM
439 unsigned short scode;
440 char memmod_str[64], *p;
1da177e4 441
6c52a96e
DM
442 if (udbl & bit) {
443 scode = ecc_syndrome_table[udbl & 0xff];
881d021a 444 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
1da177e4
LT
445 p = syndrome_unknown;
446 else
447 p = memmod_str;
448 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
449 "Memory Module \"%s\"\n",
450 smp_processor_id(), scode, p);
451 }
452
6c52a96e
DM
453 if (udbh & bit) {
454 scode = ecc_syndrome_table[udbh & 0xff];
881d021a 455 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
1da177e4
LT
456 p = syndrome_unknown;
457 else
458 p = memmod_str;
459 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
460 "Memory Module \"%s\"\n",
461 smp_processor_id(), scode, p);
462 }
6c52a96e
DM
463
464}
465
466static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
467{
468
469 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
470 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
471 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
472
473 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
474
475 /* We always log it, even if someone is listening for this
476 * trap.
477 */
478 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
479 0, TRAP_TYPE_CEE, SIGTRAP);
480
481 /* The Correctable ECC Error trap does not disable I/D caches. So
482 * we only have to restore the ESTATE Error Enable register.
483 */
484 spitfire_enable_estate_errors();
485}
486
487static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
488{
489 siginfo_t info;
490
491 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
492 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
493 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
494
495 /* XXX add more human friendly logging of the error status
496 * XXX as is implemented for cheetah
497 */
498
499 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
500
501 /* We always log it, even if someone is listening for this
502 * trap.
503 */
504 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
505 0, tt, SIGTRAP);
506
507 if (regs->tstate & TSTATE_PRIV) {
508 if (tl1)
509 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
510 die_if_kernel("UE", regs);
511 }
512
513 /* XXX need more intelligent processing here, such as is implemented
514 * XXX for cheetah errors, in fact if the E-cache still holds the
515 * XXX line with bad parity this will loop
516 */
517
518 spitfire_clean_and_reenable_l1_caches();
519 spitfire_enable_estate_errors();
520
521 if (test_thread_flag(TIF_32BIT)) {
522 regs->tpc &= 0xffffffff;
523 regs->tnpc &= 0xffffffff;
524 }
525 info.si_signo = SIGBUS;
526 info.si_errno = 0;
527 info.si_code = BUS_OBJERR;
528 info.si_addr = (void *)0;
529 info.si_trapno = 0;
530 force_sig_info(SIGBUS, &info, current);
531}
532
533void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
534{
535 unsigned long afsr, tt, udbh, udbl;
536 int tl1;
537
538 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
539 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
540 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
541 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
542 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
543
544#ifdef CONFIG_PCI
545 if (tt == TRAP_TYPE_DAE &&
546 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
547 spitfire_clean_and_reenable_l1_caches();
548 spitfire_enable_estate_errors();
549
550 pci_poke_faulted = 1;
551 regs->tnpc = regs->tpc + 4;
552 return;
553 }
554#endif
555
556 if (afsr & SFAFSR_UE)
557 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
558
559 if (tt == TRAP_TYPE_CEE) {
560 /* Handle the case where we took a CEE trap, but ACK'd
561 * only the UE state in the UDB error registers.
562 */
563 if (afsr & SFAFSR_UE) {
564 if (udbh & UDBE_CE) {
565 __asm__ __volatile__(
566 "stxa %0, [%1] %2\n\t"
567 "membar #Sync"
568 : /* no outputs */
569 : "r" (udbh & UDBE_CE),
570 "r" (0x0), "i" (ASI_UDB_ERROR_W));
571 }
572 if (udbl & UDBE_CE) {
573 __asm__ __volatile__(
574 "stxa %0, [%1] %2\n\t"
575 "membar #Sync"
576 : /* no outputs */
577 : "r" (udbl & UDBE_CE),
578 "r" (0x18), "i" (ASI_UDB_ERROR_W));
579 }
580 }
581
582 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
583 }
1da177e4
LT
584}
585
816242da
DM
586int cheetah_pcache_forced_on;
587
588void cheetah_enable_pcache(void)
589{
590 unsigned long dcr;
591
592 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
593 smp_processor_id());
594
595 __asm__ __volatile__("ldxa [%%g0] %1, %0"
596 : "=r" (dcr)
597 : "i" (ASI_DCU_CONTROL_REG));
598 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
599 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
600 "membar #Sync"
601 : /* no outputs */
602 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
603}
604
1da177e4
LT
605/* Cheetah error trap handling. */
606static unsigned long ecache_flush_physbase;
607static unsigned long ecache_flush_linesize;
608static unsigned long ecache_flush_size;
609
1da177e4
LT
610/* This table is ordered in priority of errors and matches the
611 * AFAR overwrite policy as well.
612 */
613
614struct afsr_error_table {
615 unsigned long mask;
616 const char *name;
617};
618
619static const char CHAFSR_PERR_msg[] =
620 "System interface protocol error";
621static const char CHAFSR_IERR_msg[] =
622 "Internal processor error";
623static const char CHAFSR_ISAP_msg[] =
624 "System request parity error on incoming addresss";
625static const char CHAFSR_UCU_msg[] =
626 "Uncorrectable E-cache ECC error for ifetch/data";
627static const char CHAFSR_UCC_msg[] =
628 "SW Correctable E-cache ECC error for ifetch/data";
629static const char CHAFSR_UE_msg[] =
630 "Uncorrectable system bus data ECC error for read";
631static const char CHAFSR_EDU_msg[] =
632 "Uncorrectable E-cache ECC error for stmerge/blkld";
633static const char CHAFSR_EMU_msg[] =
634 "Uncorrectable system bus MTAG error";
635static const char CHAFSR_WDU_msg[] =
636 "Uncorrectable E-cache ECC error for writeback";
637static const char CHAFSR_CPU_msg[] =
638 "Uncorrectable ECC error for copyout";
639static const char CHAFSR_CE_msg[] =
640 "HW corrected system bus data ECC error for read";
641static const char CHAFSR_EDC_msg[] =
642 "HW corrected E-cache ECC error for stmerge/blkld";
643static const char CHAFSR_EMC_msg[] =
644 "HW corrected system bus MTAG ECC error";
645static const char CHAFSR_WDC_msg[] =
646 "HW corrected E-cache ECC error for writeback";
647static const char CHAFSR_CPC_msg[] =
648 "HW corrected ECC error for copyout";
649static const char CHAFSR_TO_msg[] =
650 "Unmapped error from system bus";
651static const char CHAFSR_BERR_msg[] =
652 "Bus error response from system bus";
653static const char CHAFSR_IVC_msg[] =
654 "HW corrected system bus data ECC error for ivec read";
655static const char CHAFSR_IVU_msg[] =
656 "Uncorrectable system bus data ECC error for ivec read";
657static struct afsr_error_table __cheetah_error_table[] = {
658 { CHAFSR_PERR, CHAFSR_PERR_msg },
659 { CHAFSR_IERR, CHAFSR_IERR_msg },
660 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
661 { CHAFSR_UCU, CHAFSR_UCU_msg },
662 { CHAFSR_UCC, CHAFSR_UCC_msg },
663 { CHAFSR_UE, CHAFSR_UE_msg },
664 { CHAFSR_EDU, CHAFSR_EDU_msg },
665 { CHAFSR_EMU, CHAFSR_EMU_msg },
666 { CHAFSR_WDU, CHAFSR_WDU_msg },
667 { CHAFSR_CPU, CHAFSR_CPU_msg },
668 { CHAFSR_CE, CHAFSR_CE_msg },
669 { CHAFSR_EDC, CHAFSR_EDC_msg },
670 { CHAFSR_EMC, CHAFSR_EMC_msg },
671 { CHAFSR_WDC, CHAFSR_WDC_msg },
672 { CHAFSR_CPC, CHAFSR_CPC_msg },
673 { CHAFSR_TO, CHAFSR_TO_msg },
674 { CHAFSR_BERR, CHAFSR_BERR_msg },
675 /* These two do not update the AFAR. */
676 { CHAFSR_IVC, CHAFSR_IVC_msg },
677 { CHAFSR_IVU, CHAFSR_IVU_msg },
678 { 0, NULL },
679};
680static const char CHPAFSR_DTO_msg[] =
681 "System bus unmapped error for prefetch/storequeue-read";
682static const char CHPAFSR_DBERR_msg[] =
683 "System bus error for prefetch/storequeue-read";
684static const char CHPAFSR_THCE_msg[] =
685 "Hardware corrected E-cache Tag ECC error";
686static const char CHPAFSR_TSCE_msg[] =
687 "SW handled correctable E-cache Tag ECC error";
688static const char CHPAFSR_TUE_msg[] =
689 "Uncorrectable E-cache Tag ECC error";
690static const char CHPAFSR_DUE_msg[] =
691 "System bus uncorrectable data ECC error due to prefetch/store-fill";
692static struct afsr_error_table __cheetah_plus_error_table[] = {
693 { CHAFSR_PERR, CHAFSR_PERR_msg },
694 { CHAFSR_IERR, CHAFSR_IERR_msg },
695 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
696 { CHAFSR_UCU, CHAFSR_UCU_msg },
697 { CHAFSR_UCC, CHAFSR_UCC_msg },
698 { CHAFSR_UE, CHAFSR_UE_msg },
699 { CHAFSR_EDU, CHAFSR_EDU_msg },
700 { CHAFSR_EMU, CHAFSR_EMU_msg },
701 { CHAFSR_WDU, CHAFSR_WDU_msg },
702 { CHAFSR_CPU, CHAFSR_CPU_msg },
703 { CHAFSR_CE, CHAFSR_CE_msg },
704 { CHAFSR_EDC, CHAFSR_EDC_msg },
705 { CHAFSR_EMC, CHAFSR_EMC_msg },
706 { CHAFSR_WDC, CHAFSR_WDC_msg },
707 { CHAFSR_CPC, CHAFSR_CPC_msg },
708 { CHAFSR_TO, CHAFSR_TO_msg },
709 { CHAFSR_BERR, CHAFSR_BERR_msg },
710 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
711 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
712 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
713 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
714 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
715 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
716 /* These two do not update the AFAR. */
717 { CHAFSR_IVC, CHAFSR_IVC_msg },
718 { CHAFSR_IVU, CHAFSR_IVU_msg },
719 { 0, NULL },
720};
721static const char JPAFSR_JETO_msg[] =
722 "System interface protocol error, hw timeout caused";
723static const char JPAFSR_SCE_msg[] =
724 "Parity error on system snoop results";
725static const char JPAFSR_JEIC_msg[] =
726 "System interface protocol error, illegal command detected";
727static const char JPAFSR_JEIT_msg[] =
728 "System interface protocol error, illegal ADTYPE detected";
729static const char JPAFSR_OM_msg[] =
730 "Out of range memory error has occurred";
731static const char JPAFSR_ETP_msg[] =
732 "Parity error on L2 cache tag SRAM";
733static const char JPAFSR_UMS_msg[] =
734 "Error due to unsupported store";
735static const char JPAFSR_RUE_msg[] =
736 "Uncorrectable ECC error from remote cache/memory";
737static const char JPAFSR_RCE_msg[] =
738 "Correctable ECC error from remote cache/memory";
739static const char JPAFSR_BP_msg[] =
740 "JBUS parity error on returned read data";
741static const char JPAFSR_WBP_msg[] =
742 "JBUS parity error on data for writeback or block store";
743static const char JPAFSR_FRC_msg[] =
744 "Foreign read to DRAM incurring correctable ECC error";
745static const char JPAFSR_FRU_msg[] =
746 "Foreign read to DRAM incurring uncorrectable ECC error";
747static struct afsr_error_table __jalapeno_error_table[] = {
748 { JPAFSR_JETO, JPAFSR_JETO_msg },
749 { JPAFSR_SCE, JPAFSR_SCE_msg },
750 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
751 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
752 { CHAFSR_PERR, CHAFSR_PERR_msg },
753 { CHAFSR_IERR, CHAFSR_IERR_msg },
754 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
755 { CHAFSR_UCU, CHAFSR_UCU_msg },
756 { CHAFSR_UCC, CHAFSR_UCC_msg },
757 { CHAFSR_UE, CHAFSR_UE_msg },
758 { CHAFSR_EDU, CHAFSR_EDU_msg },
759 { JPAFSR_OM, JPAFSR_OM_msg },
760 { CHAFSR_WDU, CHAFSR_WDU_msg },
761 { CHAFSR_CPU, CHAFSR_CPU_msg },
762 { CHAFSR_CE, CHAFSR_CE_msg },
763 { CHAFSR_EDC, CHAFSR_EDC_msg },
764 { JPAFSR_ETP, JPAFSR_ETP_msg },
765 { CHAFSR_WDC, CHAFSR_WDC_msg },
766 { CHAFSR_CPC, CHAFSR_CPC_msg },
767 { CHAFSR_TO, CHAFSR_TO_msg },
768 { CHAFSR_BERR, CHAFSR_BERR_msg },
769 { JPAFSR_UMS, JPAFSR_UMS_msg },
770 { JPAFSR_RUE, JPAFSR_RUE_msg },
771 { JPAFSR_RCE, JPAFSR_RCE_msg },
772 { JPAFSR_BP, JPAFSR_BP_msg },
773 { JPAFSR_WBP, JPAFSR_WBP_msg },
774 { JPAFSR_FRC, JPAFSR_FRC_msg },
775 { JPAFSR_FRU, JPAFSR_FRU_msg },
776 /* These two do not update the AFAR. */
777 { CHAFSR_IVU, CHAFSR_IVU_msg },
778 { 0, NULL },
779};
780static struct afsr_error_table *cheetah_error_table;
781static unsigned long cheetah_afsr_errors;
782
1da177e4
LT
783struct cheetah_err_info *cheetah_error_log;
784
d979f179 785static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
1da177e4
LT
786{
787 struct cheetah_err_info *p;
788 int cpu = smp_processor_id();
789
790 if (!cheetah_error_log)
791 return NULL;
792
793 p = cheetah_error_log + (cpu * 2);
794 if ((afsr & CHAFSR_TL1) != 0UL)
795 p++;
796
797 return p;
798}
799
800extern unsigned int tl0_icpe[], tl1_icpe[];
801extern unsigned int tl0_dcpe[], tl1_dcpe[];
802extern unsigned int tl0_fecc[], tl1_fecc[];
803extern unsigned int tl0_cee[], tl1_cee[];
804extern unsigned int tl0_iae[], tl1_iae[];
805extern unsigned int tl0_dae[], tl1_dae[];
806extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
807extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
808extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
809extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
810extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
811
812void __init cheetah_ecache_flush_init(void)
813{
814 unsigned long largest_size, smallest_linesize, order, ver;
5cbc3073 815 int i, sz;
1da177e4
LT
816
817 /* Scan all cpu device tree nodes, note two values:
818 * 1) largest E-cache size
819 * 2) smallest E-cache line size
820 */
821 largest_size = 0UL;
822 smallest_linesize = ~0UL;
823
5cbc3073 824 for (i = 0; i < NR_CPUS; i++) {
1da177e4
LT
825 unsigned long val;
826
5cbc3073
DM
827 val = cpu_data(i).ecache_size;
828 if (!val)
829 continue;
830
1da177e4
LT
831 if (val > largest_size)
832 largest_size = val;
5cbc3073
DM
833
834 val = cpu_data(i).ecache_line_size;
1da177e4
LT
835 if (val < smallest_linesize)
836 smallest_linesize = val;
5cbc3073 837
1da177e4
LT
838 }
839
840 if (largest_size == 0UL || smallest_linesize == ~0UL) {
841 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
842 "parameters.\n");
843 prom_halt();
844 }
845
846 ecache_flush_size = (2 * largest_size);
847 ecache_flush_linesize = smallest_linesize;
848
10147570 849 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
1da177e4 850
10147570 851 if (ecache_flush_physbase == ~0UL) {
1da177e4 852 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
10147570
DM
853 "contiguous physical memory.\n",
854 ecache_flush_size);
1da177e4
LT
855 prom_halt();
856 }
857
858 /* Now allocate error trap reporting scoreboard. */
07f8e5f3 859 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
1da177e4 860 for (order = 0; order < MAX_ORDER; order++) {
07f8e5f3 861 if ((PAGE_SIZE << order) >= sz)
1da177e4
LT
862 break;
863 }
864 cheetah_error_log = (struct cheetah_err_info *)
865 __get_free_pages(GFP_KERNEL, order);
866 if (!cheetah_error_log) {
867 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
07f8e5f3 868 "error logging scoreboard (%d bytes).\n", sz);
1da177e4
LT
869 prom_halt();
870 }
871 memset(cheetah_error_log, 0, PAGE_SIZE << order);
872
873 /* Mark all AFSRs as invalid so that the trap handler will
874 * log new new information there.
875 */
876 for (i = 0; i < 2 * NR_CPUS; i++)
877 cheetah_error_log[i].afsr = CHAFSR_INVALID;
878
879 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
880 if ((ver >> 32) == __JALAPENO_ID ||
881 (ver >> 32) == __SERRANO_ID) {
1da177e4
LT
882 cheetah_error_table = &__jalapeno_error_table[0];
883 cheetah_afsr_errors = JPAFSR_ERRORS;
884 } else if ((ver >> 32) == 0x003e0015) {
885 cheetah_error_table = &__cheetah_plus_error_table[0];
886 cheetah_afsr_errors = CHPAFSR_ERRORS;
887 } else {
888 cheetah_error_table = &__cheetah_error_table[0];
889 cheetah_afsr_errors = CHAFSR_ERRORS;
890 }
891
892 /* Now patch trap tables. */
893 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
894 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
895 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
896 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
897 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
898 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
899 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
900 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
901 if (tlb_type == cheetah_plus) {
902 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
903 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
904 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
905 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
906 }
907 flushi(PAGE_OFFSET);
908}
909
910static void cheetah_flush_ecache(void)
911{
912 unsigned long flush_base = ecache_flush_physbase;
913 unsigned long flush_linesize = ecache_flush_linesize;
914 unsigned long flush_size = ecache_flush_size;
915
916 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
917 " bne,pt %%xcc, 1b\n\t"
918 " ldxa [%2 + %0] %3, %%g0\n\t"
919 : "=&r" (flush_size)
920 : "0" (flush_size), "r" (flush_base),
921 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
922}
923
924static void cheetah_flush_ecache_line(unsigned long physaddr)
925{
926 unsigned long alias;
927
928 physaddr &= ~(8UL - 1UL);
929 physaddr = (ecache_flush_physbase +
930 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
931 alias = physaddr + (ecache_flush_size >> 1UL);
932 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
933 "ldxa [%1] %2, %%g0\n\t"
934 "membar #Sync"
935 : /* no outputs */
936 : "r" (physaddr), "r" (alias),
937 "i" (ASI_PHYS_USE_EC));
938}
939
940/* Unfortunately, the diagnostic access to the I-cache tags we need to
941 * use to clear the thing interferes with I-cache coherency transactions.
942 *
943 * So we must only flush the I-cache when it is disabled.
944 */
945static void __cheetah_flush_icache(void)
946{
80dc0d6b
DM
947 unsigned int icache_size, icache_line_size;
948 unsigned long addr;
949
950 icache_size = local_cpu_data().icache_size;
951 icache_line_size = local_cpu_data().icache_line_size;
1da177e4
LT
952
953 /* Clear the valid bits in all the tags. */
80dc0d6b 954 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1da177e4
LT
955 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
956 "membar #Sync"
957 : /* no outputs */
80dc0d6b
DM
958 : "r" (addr | (2 << 3)),
959 "i" (ASI_IC_TAG));
1da177e4
LT
960 }
961}
962
963static void cheetah_flush_icache(void)
964{
965 unsigned long dcu_save;
966
967 /* Save current DCU, disable I-cache. */
968 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
969 "or %0, %2, %%g1\n\t"
970 "stxa %%g1, [%%g0] %1\n\t"
971 "membar #Sync"
972 : "=r" (dcu_save)
973 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
974 : "g1");
975
976 __cheetah_flush_icache();
977
978 /* Restore DCU register */
979 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
980 "membar #Sync"
981 : /* no outputs */
982 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
983}
984
985static void cheetah_flush_dcache(void)
986{
80dc0d6b
DM
987 unsigned int dcache_size, dcache_line_size;
988 unsigned long addr;
989
990 dcache_size = local_cpu_data().dcache_size;
991 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 992
80dc0d6b 993 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1da177e4
LT
994 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
995 "membar #Sync"
996 : /* no outputs */
80dc0d6b 997 : "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
998 }
999}
1000
1001/* In order to make the even parity correct we must do two things.
1002 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1003 * Next, we clear out all 32-bytes of data for that line. Data of
1004 * all-zero + tag parity value of zero == correct parity.
1005 */
1006static void cheetah_plus_zap_dcache_parity(void)
1007{
80dc0d6b
DM
1008 unsigned int dcache_size, dcache_line_size;
1009 unsigned long addr;
1010
1011 dcache_size = local_cpu_data().dcache_size;
1012 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 1013
80dc0d6b
DM
1014 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1015 unsigned long tag = (addr >> 14);
1016 unsigned long line;
1da177e4
LT
1017
1018 __asm__ __volatile__("membar #Sync\n\t"
1019 "stxa %0, [%1] %2\n\t"
1020 "membar #Sync"
1021 : /* no outputs */
80dc0d6b 1022 : "r" (tag), "r" (addr),
1da177e4 1023 "i" (ASI_DCACHE_UTAG));
80dc0d6b 1024 for (line = addr; line < addr + dcache_line_size; line += 8)
1da177e4
LT
1025 __asm__ __volatile__("membar #Sync\n\t"
1026 "stxa %%g0, [%0] %1\n\t"
1027 "membar #Sync"
1028 : /* no outputs */
80dc0d6b
DM
1029 : "r" (line),
1030 "i" (ASI_DCACHE_DATA));
1da177e4
LT
1031 }
1032}
1033
1034/* Conversion tables used to frob Cheetah AFSR syndrome values into
1035 * something palatable to the memory controller driver get_unumber
1036 * routine.
1037 */
1038#define MT0 137
1039#define MT1 138
1040#define MT2 139
1041#define NONE 254
1042#define MTC0 140
1043#define MTC1 141
1044#define MTC2 142
1045#define MTC3 143
1046#define C0 128
1047#define C1 129
1048#define C2 130
1049#define C3 131
1050#define C4 132
1051#define C5 133
1052#define C6 134
1053#define C7 135
1054#define C8 136
1055#define M2 144
1056#define M3 145
1057#define M4 146
1058#define M 147
1059static unsigned char cheetah_ecc_syntab[] = {
1060/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1061/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1062/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1063/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1064/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1065/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1066/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1067/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1068/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1069/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1070/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1071/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1072/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1073/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1074/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1075/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1076/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1077/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1078/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1079/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1080/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1081/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1082/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1083/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1084/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1085/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1086/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1087/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1088/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1089/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1090/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1091/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1092};
1093static unsigned char cheetah_mtag_syntab[] = {
1094 NONE, MTC0,
1095 MTC1, NONE,
1096 MTC2, NONE,
1097 NONE, MT0,
1098 MTC3, NONE,
1099 NONE, MT1,
1100 NONE, MT2,
1101 NONE, NONE
1102};
1103
1104/* Return the highest priority error conditon mentioned. */
d979f179 1105static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1da177e4
LT
1106{
1107 unsigned long tmp = 0;
1108 int i;
1109
1110 for (i = 0; cheetah_error_table[i].mask; i++) {
1111 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1112 return tmp;
1113 }
1114 return tmp;
1115}
1116
1117static const char *cheetah_get_string(unsigned long bit)
1118{
1119 int i;
1120
1121 for (i = 0; cheetah_error_table[i].mask; i++) {
1122 if ((bit & cheetah_error_table[i].mask) != 0UL)
1123 return cheetah_error_table[i].name;
1124 }
1125 return "???";
1126}
1127
1da177e4
LT
1128static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1129 unsigned long afsr, unsigned long afar, int recoverable)
1130{
1131 unsigned long hipri;
1132 char unum[256];
1133
1134 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1136 afsr, afar,
1137 (afsr & CHAFSR_TL1) ? 1 : 0);
955c054f 1138 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1da177e4 1139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
955c054f 1140 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5af47db7
DM
1141 printk("%s" "ERROR(%d): ",
1142 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4fe3ebec 1143 printk("TPC<%pS>\n", (void *) regs->tpc);
1da177e4
LT
1144 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1145 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1146 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1147 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1148 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1149 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1150 hipri = cheetah_get_hipri(afsr);
1151 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1152 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1153 hipri, cheetah_get_string(hipri));
1154
1155 /* Try to get unumber if relevant. */
1156#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1157 CHAFSR_CPC | CHAFSR_CPU | \
1158 CHAFSR_UE | CHAFSR_CE | \
1159 CHAFSR_EDC | CHAFSR_EDU | \
1160 CHAFSR_UCC | CHAFSR_UCU | \
1161 CHAFSR_WDU | CHAFSR_WDC)
1162#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1163 if (afsr & ESYND_ERRORS) {
1164 int syndrome;
1165 int ret;
1166
1167 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1168 syndrome = cheetah_ecc_syntab[syndrome];
881d021a 1169 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1da177e4
LT
1170 if (ret != -1)
1171 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1172 (recoverable ? KERN_WARNING : KERN_CRIT),
1173 smp_processor_id(), unum);
1174 } else if (afsr & MSYND_ERRORS) {
1175 int syndrome;
1176 int ret;
1177
1178 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1179 syndrome = cheetah_mtag_syntab[syndrome];
881d021a 1180 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1da177e4
LT
1181 if (ret != -1)
1182 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1183 (recoverable ? KERN_WARNING : KERN_CRIT),
1184 smp_processor_id(), unum);
1185 }
1186
1187 /* Now dump the cache snapshots. */
90181136 1188 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1da177e4
LT
1189 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1190 (int) info->dcache_index,
1191 info->dcache_tag,
1192 info->dcache_utag,
1193 info->dcache_stag);
90181136 1194 printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1da177e4
LT
1195 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1196 info->dcache_data[0],
1197 info->dcache_data[1],
1198 info->dcache_data[2],
1199 info->dcache_data[3]);
90181136
SR
1200 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1201 "u[%016llx] l[%016llx]\n",
1da177e4
LT
1202 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1203 (int) info->icache_index,
1204 info->icache_tag,
1205 info->icache_utag,
1206 info->icache_stag,
1207 info->icache_upper,
1208 info->icache_lower);
90181136 1209 printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1da177e4
LT
1210 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1211 info->icache_data[0],
1212 info->icache_data[1],
1213 info->icache_data[2],
1214 info->icache_data[3]);
90181136 1215 printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1da177e4
LT
1216 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1217 info->icache_data[4],
1218 info->icache_data[5],
1219 info->icache_data[6],
1220 info->icache_data[7]);
90181136 1221 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1da177e4
LT
1222 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1223 (int) info->ecache_index, info->ecache_tag);
90181136 1224 printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1da177e4
LT
1225 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1226 info->ecache_data[0],
1227 info->ecache_data[1],
1228 info->ecache_data[2],
1229 info->ecache_data[3]);
1230
1231 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1232 while (afsr != 0UL) {
1233 unsigned long bit = cheetah_get_hipri(afsr);
1234
1235 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1236 (recoverable ? KERN_WARNING : KERN_CRIT),
1237 bit, cheetah_get_string(bit));
1238
1239 afsr &= ~bit;
1240 }
1241
1242 if (!recoverable)
1243 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1244}
1245
1246static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1247{
1248 unsigned long afsr, afar;
1249 int ret = 0;
1250
1251 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1252 : "=r" (afsr)
1253 : "i" (ASI_AFSR));
1254 if ((afsr & cheetah_afsr_errors) != 0) {
1255 if (logp != NULL) {
1256 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1257 : "=r" (afar)
1258 : "i" (ASI_AFAR));
1259 logp->afsr = afsr;
1260 logp->afar = afar;
1261 }
1262 ret = 1;
1263 }
1264 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1265 "membar #Sync\n\t"
1266 : : "r" (afsr), "i" (ASI_AFSR));
1267
1268 return ret;
1269}
1270
1271void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1272{
1273 struct cheetah_err_info local_snapshot, *p;
1274 int recoverable;
1275
1276 /* Flush E-cache */
1277 cheetah_flush_ecache();
1278
1279 p = cheetah_get_error_log(afsr);
1280 if (!p) {
1281 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1282 afsr, afar);
1283 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1284 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1285 prom_halt();
1286 }
1287
1288 /* Grab snapshot of logged error. */
1289 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1290
1291 /* If the current trap snapshot does not match what the
1292 * trap handler passed along into our args, big trouble.
1293 * In such a case, mark the local copy as invalid.
1294 *
1295 * Else, it matches and we mark the afsr in the non-local
1296 * copy as invalid so we may log new error traps there.
1297 */
1298 if (p->afsr != afsr || p->afar != afar)
1299 local_snapshot.afsr = CHAFSR_INVALID;
1300 else
1301 p->afsr = CHAFSR_INVALID;
1302
1303 cheetah_flush_icache();
1304 cheetah_flush_dcache();
1305
1306 /* Re-enable I-cache/D-cache */
1307 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1308 "or %%g1, %1, %%g1\n\t"
1309 "stxa %%g1, [%%g0] %0\n\t"
1310 "membar #Sync"
1311 : /* no outputs */
1312 : "i" (ASI_DCU_CONTROL_REG),
1313 "i" (DCU_DC | DCU_IC)
1314 : "g1");
1315
1316 /* Re-enable error reporting */
1317 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1318 "or %%g1, %1, %%g1\n\t"
1319 "stxa %%g1, [%%g0] %0\n\t"
1320 "membar #Sync"
1321 : /* no outputs */
1322 : "i" (ASI_ESTATE_ERROR_EN),
1323 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1324 : "g1");
1325
1326 /* Decide if we can continue after handling this trap and
1327 * logging the error.
1328 */
1329 recoverable = 1;
1330 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1331 recoverable = 0;
1332
1333 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1334 * error was logged while we had error reporting traps disabled.
1335 */
1336 if (cheetah_recheck_errors(&local_snapshot)) {
1337 unsigned long new_afsr = local_snapshot.afsr;
1338
1339 /* If we got a new asynchronous error, die... */
1340 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1341 CHAFSR_WDU | CHAFSR_CPU |
1342 CHAFSR_IVU | CHAFSR_UE |
1343 CHAFSR_BERR | CHAFSR_TO))
1344 recoverable = 0;
1345 }
1346
1347 /* Log errors. */
1348 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1349
1350 if (!recoverable)
1351 panic("Irrecoverable Fast-ECC error trap.\n");
1352
1353 /* Flush E-cache to kick the error trap handlers out. */
1354 cheetah_flush_ecache();
1355}
1356
1357/* Try to fix a correctable error by pushing the line out from
1358 * the E-cache. Recheck error reporting registers to see if the
1359 * problem is intermittent.
1360 */
1361static int cheetah_fix_ce(unsigned long physaddr)
1362{
1363 unsigned long orig_estate;
1364 unsigned long alias1, alias2;
1365 int ret;
1366
1367 /* Make sure correctable error traps are disabled. */
1368 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1369 "andn %0, %1, %%g1\n\t"
1370 "stxa %%g1, [%%g0] %2\n\t"
1371 "membar #Sync"
1372 : "=&r" (orig_estate)
1373 : "i" (ESTATE_ERROR_CEEN),
1374 "i" (ASI_ESTATE_ERROR_EN)
1375 : "g1");
1376
1377 /* We calculate alias addresses that will force the
1378 * cache line in question out of the E-cache. Then
1379 * we bring it back in with an atomic instruction so
1380 * that we get it in some modified/exclusive state,
1381 * then we displace it again to try and get proper ECC
1382 * pushed back into the system.
1383 */
1384 physaddr &= ~(8UL - 1UL);
1385 alias1 = (ecache_flush_physbase +
1386 (physaddr & ((ecache_flush_size >> 1) - 1)));
1387 alias2 = alias1 + (ecache_flush_size >> 1);
1388 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1389 "ldxa [%1] %3, %%g0\n\t"
1390 "casxa [%2] %3, %%g0, %%g0\n\t"
1da177e4
LT
1391 "ldxa [%0] %3, %%g0\n\t"
1392 "ldxa [%1] %3, %%g0\n\t"
1393 "membar #Sync"
1394 : /* no outputs */
1395 : "r" (alias1), "r" (alias2),
1396 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1397
1398 /* Did that trigger another error? */
1399 if (cheetah_recheck_errors(NULL)) {
1400 /* Try one more time. */
1401 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1402 "membar #Sync"
1403 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1404 if (cheetah_recheck_errors(NULL))
1405 ret = 2;
1406 else
1407 ret = 1;
1408 } else {
1409 /* No new error, intermittent problem. */
1410 ret = 0;
1411 }
1412
1413 /* Restore error enables. */
1414 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1415 "membar #Sync"
1416 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1417
1418 return ret;
1419}
1420
1421/* Return non-zero if PADDR is a valid physical memory address. */
1422static int cheetah_check_main_memory(unsigned long paddr)
1423{
10147570 1424 unsigned long vaddr = PAGE_OFFSET + paddr;
1da177e4 1425
13edad7a 1426 if (vaddr > (unsigned long) high_memory)
ed3ffaf7
DM
1427 return 0;
1428
10147570 1429 return kern_addr_valid(vaddr);
1da177e4
LT
1430}
1431
1432void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1433{
1434 struct cheetah_err_info local_snapshot, *p;
1435 int recoverable, is_memory;
1436
1437 p = cheetah_get_error_log(afsr);
1438 if (!p) {
1439 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1440 afsr, afar);
1441 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1442 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1443 prom_halt();
1444 }
1445
1446 /* Grab snapshot of logged error. */
1447 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1448
1449 /* If the current trap snapshot does not match what the
1450 * trap handler passed along into our args, big trouble.
1451 * In such a case, mark the local copy as invalid.
1452 *
1453 * Else, it matches and we mark the afsr in the non-local
1454 * copy as invalid so we may log new error traps there.
1455 */
1456 if (p->afsr != afsr || p->afar != afar)
1457 local_snapshot.afsr = CHAFSR_INVALID;
1458 else
1459 p->afsr = CHAFSR_INVALID;
1460
1461 is_memory = cheetah_check_main_memory(afar);
1462
1463 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1464 /* XXX Might want to log the results of this operation
1465 * XXX somewhere... -DaveM
1466 */
1467 cheetah_fix_ce(afar);
1468 }
1469
1470 {
1471 int flush_all, flush_line;
1472
1473 flush_all = flush_line = 0;
1474 if ((afsr & CHAFSR_EDC) != 0UL) {
1475 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1476 flush_line = 1;
1477 else
1478 flush_all = 1;
1479 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1480 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1481 flush_line = 1;
1482 else
1483 flush_all = 1;
1484 }
1485
1486 /* Trap handler only disabled I-cache, flush it. */
1487 cheetah_flush_icache();
1488
1489 /* Re-enable I-cache */
1490 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1491 "or %%g1, %1, %%g1\n\t"
1492 "stxa %%g1, [%%g0] %0\n\t"
1493 "membar #Sync"
1494 : /* no outputs */
1495 : "i" (ASI_DCU_CONTROL_REG),
1496 "i" (DCU_IC)
1497 : "g1");
1498
1499 if (flush_all)
1500 cheetah_flush_ecache();
1501 else if (flush_line)
1502 cheetah_flush_ecache_line(afar);
1503 }
1504
1505 /* Re-enable error reporting */
1506 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1507 "or %%g1, %1, %%g1\n\t"
1508 "stxa %%g1, [%%g0] %0\n\t"
1509 "membar #Sync"
1510 : /* no outputs */
1511 : "i" (ASI_ESTATE_ERROR_EN),
1512 "i" (ESTATE_ERROR_CEEN)
1513 : "g1");
1514
1515 /* Decide if we can continue after handling this trap and
1516 * logging the error.
1517 */
1518 recoverable = 1;
1519 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1520 recoverable = 0;
1521
1522 /* Re-check AFSR/AFAR */
1523 (void) cheetah_recheck_errors(&local_snapshot);
1524
1525 /* Log errors. */
1526 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1527
1528 if (!recoverable)
1529 panic("Irrecoverable Correctable-ECC error trap.\n");
1530}
1531
1532void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1533{
1534 struct cheetah_err_info local_snapshot, *p;
1535 int recoverable, is_memory;
1536
1537#ifdef CONFIG_PCI
1538 /* Check for the special PCI poke sequence. */
1539 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1540 cheetah_flush_icache();
1541 cheetah_flush_dcache();
1542
1543 /* Re-enable I-cache/D-cache */
1544 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1545 "or %%g1, %1, %%g1\n\t"
1546 "stxa %%g1, [%%g0] %0\n\t"
1547 "membar #Sync"
1548 : /* no outputs */
1549 : "i" (ASI_DCU_CONTROL_REG),
1550 "i" (DCU_DC | DCU_IC)
1551 : "g1");
1552
1553 /* Re-enable error reporting */
1554 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1555 "or %%g1, %1, %%g1\n\t"
1556 "stxa %%g1, [%%g0] %0\n\t"
1557 "membar #Sync"
1558 : /* no outputs */
1559 : "i" (ASI_ESTATE_ERROR_EN),
1560 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1561 : "g1");
1562
1563 (void) cheetah_recheck_errors(NULL);
1564
1565 pci_poke_faulted = 1;
1566 regs->tpc += 4;
1567 regs->tnpc = regs->tpc + 4;
1568 return;
1569 }
1570#endif
1571
1572 p = cheetah_get_error_log(afsr);
1573 if (!p) {
1574 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1575 afsr, afar);
1576 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1577 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1578 prom_halt();
1579 }
1580
1581 /* Grab snapshot of logged error. */
1582 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1583
1584 /* If the current trap snapshot does not match what the
1585 * trap handler passed along into our args, big trouble.
1586 * In such a case, mark the local copy as invalid.
1587 *
1588 * Else, it matches and we mark the afsr in the non-local
1589 * copy as invalid so we may log new error traps there.
1590 */
1591 if (p->afsr != afsr || p->afar != afar)
1592 local_snapshot.afsr = CHAFSR_INVALID;
1593 else
1594 p->afsr = CHAFSR_INVALID;
1595
1596 is_memory = cheetah_check_main_memory(afar);
1597
1598 {
1599 int flush_all, flush_line;
1600
1601 flush_all = flush_line = 0;
1602 if ((afsr & CHAFSR_EDU) != 0UL) {
1603 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1604 flush_line = 1;
1605 else
1606 flush_all = 1;
1607 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1608 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1609 flush_line = 1;
1610 else
1611 flush_all = 1;
1612 }
1613
1614 cheetah_flush_icache();
1615 cheetah_flush_dcache();
1616
1617 /* Re-enable I/D caches */
1618 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1619 "or %%g1, %1, %%g1\n\t"
1620 "stxa %%g1, [%%g0] %0\n\t"
1621 "membar #Sync"
1622 : /* no outputs */
1623 : "i" (ASI_DCU_CONTROL_REG),
1624 "i" (DCU_IC | DCU_DC)
1625 : "g1");
1626
1627 if (flush_all)
1628 cheetah_flush_ecache();
1629 else if (flush_line)
1630 cheetah_flush_ecache_line(afar);
1631 }
1632
1633 /* Re-enable error reporting */
1634 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1635 "or %%g1, %1, %%g1\n\t"
1636 "stxa %%g1, [%%g0] %0\n\t"
1637 "membar #Sync"
1638 : /* no outputs */
1639 : "i" (ASI_ESTATE_ERROR_EN),
1640 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1641 : "g1");
1642
1643 /* Decide if we can continue after handling this trap and
1644 * logging the error.
1645 */
1646 recoverable = 1;
1647 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1648 recoverable = 0;
1649
1650 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1651 * error was logged while we had error reporting traps disabled.
1652 */
1653 if (cheetah_recheck_errors(&local_snapshot)) {
1654 unsigned long new_afsr = local_snapshot.afsr;
1655
1656 /* If we got a new asynchronous error, die... */
1657 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1658 CHAFSR_WDU | CHAFSR_CPU |
1659 CHAFSR_IVU | CHAFSR_UE |
1660 CHAFSR_BERR | CHAFSR_TO))
1661 recoverable = 0;
1662 }
1663
1664 /* Log errors. */
1665 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1666
1667 /* "Recoverable" here means we try to yank the page from ever
1668 * being newly used again. This depends upon a few things:
1669 * 1) Must be main memory, and AFAR must be valid.
1670 * 2) If we trapped from user, OK.
1671 * 3) Else, if we trapped from kernel we must find exception
1672 * table entry (ie. we have to have been accessing user
1673 * space).
1674 *
1675 * If AFAR is not in main memory, or we trapped from kernel
1676 * and cannot find an exception table entry, it is unacceptable
1677 * to try and continue.
1678 */
1679 if (recoverable && is_memory) {
1680 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1681 /* OK, usermode access. */
1682 recoverable = 1;
1683 } else {
8cf14af0 1684 const struct exception_table_entry *entry;
1da177e4 1685
8cf14af0
DM
1686 entry = search_exception_tables(regs->tpc);
1687 if (entry) {
1da177e4
LT
1688 /* OK, kernel access to userspace. */
1689 recoverable = 1;
1690
1691 } else {
1692 /* BAD, privileged state is corrupted. */
1693 recoverable = 0;
1694 }
1695
1696 if (recoverable) {
1697 if (pfn_valid(afar >> PAGE_SHIFT))
1698 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1699 else
1700 recoverable = 0;
1701
1702 /* Only perform fixup if we still have a
1703 * recoverable condition.
1704 */
1705 if (recoverable) {
8cf14af0 1706 regs->tpc = entry->fixup;
1da177e4 1707 regs->tnpc = regs->tpc + 4;
1da177e4
LT
1708 }
1709 }
1710 }
1711 } else {
1712 recoverable = 0;
1713 }
1714
1715 if (!recoverable)
1716 panic("Irrecoverable deferred error trap.\n");
1717}
1718
1719/* Handle a D/I cache parity error trap. TYPE is encoded as:
1720 *
1721 * Bit0: 0=dcache,1=icache
1722 * Bit1: 0=recoverable,1=unrecoverable
1723 *
1724 * The hardware has disabled both the I-cache and D-cache in
1725 * the %dcr register.
1726 */
1727void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1728{
1729 if (type & 0x1)
1730 __cheetah_flush_icache();
1731 else
1732 cheetah_plus_zap_dcache_parity();
1733 cheetah_flush_dcache();
1734
1735 /* Re-enable I-cache/D-cache */
1736 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1737 "or %%g1, %1, %%g1\n\t"
1738 "stxa %%g1, [%%g0] %0\n\t"
1739 "membar #Sync"
1740 : /* no outputs */
1741 : "i" (ASI_DCU_CONTROL_REG),
1742 "i" (DCU_DC | DCU_IC)
1743 : "g1");
1744
1745 if (type & 0x2) {
1746 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1747 smp_processor_id(),
1748 (type & 0x1) ? 'I' : 'D',
1749 regs->tpc);
4fe3ebec 1750 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1da177e4
LT
1751 panic("Irrecoverable Cheetah+ parity error.");
1752 }
1753
1754 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1755 smp_processor_id(),
1756 (type & 0x1) ? 'I' : 'D',
1757 regs->tpc);
4fe3ebec 1758 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1da177e4
LT
1759}
1760
5b0c0572
DM
1761struct sun4v_error_entry {
1762 u64 err_handle;
1763 u64 err_stick;
1764
1765 u32 err_type;
1766#define SUN4V_ERR_TYPE_UNDEFINED 0
1767#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1768#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1769#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1770#define SUN4V_ERR_TYPE_WARNING_RES 4
1771
1772 u32 err_attrs;
1773#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1774#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1775#define SUN4V_ERR_ATTRS_PIO 0x00000004
1776#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1777#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1778#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1779#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1780#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1781
1782 u64 err_raddr;
1783 u32 err_size;
1784 u16 err_cpu;
1785 u16 err_pad;
1786};
1787
1788static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1789static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1790
1791static const char *sun4v_err_type_to_str(u32 type)
1792{
1793 switch (type) {
1794 case SUN4V_ERR_TYPE_UNDEFINED:
1795 return "undefined";
1796 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1797 return "uncorrected resumable";
1798 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1799 return "precise nonresumable";
1800 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1801 return "deferred nonresumable";
1802 case SUN4V_ERR_TYPE_WARNING_RES:
1803 return "warning resumable";
1804 default:
1805 return "unknown";
1806 };
1807}
1808
5224e6cc 1809static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
5b0c0572
DM
1810{
1811 int cnt;
1812
1813 printk("%s: Reporting on cpu %d\n", pfx, cpu);
90181136 1814 printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n",
5b0c0572
DM
1815 pfx,
1816 ent->err_handle, ent->err_stick,
1817 ent->err_type,
1818 sun4v_err_type_to_str(ent->err_type));
1819 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1820 pfx,
1821 ent->err_attrs,
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1823 "processor" : ""),
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1825 "memory" : ""),
1826 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1827 "pio" : ""),
1828 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1829 "integer-regs" : ""),
1830 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1831 "fpu-regs" : ""),
1832 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1833 "user" : ""),
1834 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1835 "privileged" : ""),
1836 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1837 "queue-full" : ""));
90181136 1838 printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n",
5b0c0572
DM
1839 pfx,
1840 ent->err_raddr, ent->err_size, ent->err_cpu);
1841
dbf3e950 1842 show_regs(regs);
5224e6cc 1843
5b0c0572
DM
1844 if ((cnt = atomic_read(ocnt)) != 0) {
1845 atomic_set(ocnt, 0);
1846 wmb();
1847 printk("%s: Queue overflowed %d times.\n",
1848 pfx, cnt);
1849 }
1850}
1851
b4f4372f 1852/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
5b0c0572
DM
1853 * Log the event and clear the first word of the entry.
1854 */
1855void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1856{
1857 struct sun4v_error_entry *ent, local_copy;
1858 struct trap_per_cpu *tb;
1859 unsigned long paddr;
1860 int cpu;
1861
1862 cpu = get_cpu();
1863
1864 tb = &trap_block[cpu];
1865 paddr = tb->resum_kernel_buf_pa + offset;
1866 ent = __va(paddr);
1867
1868 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1869
1870 /* We have a local copy now, so release the entry. */
1871 ent->err_handle = 0;
1872 wmb();
1873
1874 put_cpu();
1875
a2c1e064
DM
1876 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1877 /* If err_type is 0x4, it's a powerdown request. Do
1878 * not do the usual resumable error log because that
1879 * makes it look like some abnormal error.
1880 */
1881 printk(KERN_INFO "Power down request...\n");
1882 kill_cad_pid(SIGINT, 1);
1883 return;
1884 }
1885
5224e6cc 1886 sun4v_log_error(regs, &local_copy, cpu,
5b0c0572
DM
1887 KERN_ERR "RESUMABLE ERROR",
1888 &sun4v_resum_oflow_cnt);
1889}
1890
1891/* If we try to printk() we'll probably make matters worse, by trying
1892 * to retake locks this cpu already holds or causing more errors. So
1893 * just bump a counter, and we'll report these counter bumps above.
1894 */
1895void sun4v_resum_overflow(struct pt_regs *regs)
1896{
1897 atomic_inc(&sun4v_resum_oflow_cnt);
1898}
1899
b4f4372f 1900/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
5b0c0572
DM
1901 * Log the event, clear the first word of the entry, and die.
1902 */
1903void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1904{
1905 struct sun4v_error_entry *ent, local_copy;
1906 struct trap_per_cpu *tb;
1907 unsigned long paddr;
1908 int cpu;
1909
1910 cpu = get_cpu();
1911
1912 tb = &trap_block[cpu];
1913 paddr = tb->nonresum_kernel_buf_pa + offset;
1914 ent = __va(paddr);
1915
1916 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1917
1918 /* We have a local copy now, so release the entry. */
1919 ent->err_handle = 0;
1920 wmb();
1921
1922 put_cpu();
1923
1924#ifdef CONFIG_PCI
1925 /* Check for the special PCI poke sequence. */
1926 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1927 pci_poke_faulted = 1;
1928 regs->tpc += 4;
1929 regs->tnpc = regs->tpc + 4;
1930 return;
1931 }
1932#endif
1933
5224e6cc 1934 sun4v_log_error(regs, &local_copy, cpu,
5b0c0572
DM
1935 KERN_EMERG "NON-RESUMABLE ERROR",
1936 &sun4v_nonresum_oflow_cnt);
1937
1938 panic("Non-resumable error.");
1939}
1940
1941/* If we try to printk() we'll probably make matters worse, by trying
1942 * to retake locks this cpu already holds or causing more errors. So
1943 * just bump a counter, and we'll report these counter bumps above.
1944 */
1945void sun4v_nonresum_overflow(struct pt_regs *regs)
1946{
1947 /* XXX Actually even this can make not that much sense. Perhaps
1948 * XXX we should just pull the plug and panic directly from here?
1949 */
1950 atomic_inc(&sun4v_nonresum_oflow_cnt);
1951}
1952
6c8927c9
DM
1953unsigned long sun4v_err_itlb_vaddr;
1954unsigned long sun4v_err_itlb_ctx;
1955unsigned long sun4v_err_itlb_pte;
1956unsigned long sun4v_err_itlb_error;
1957
1958void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1959{
1960 if (tl > 1)
1961 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1962
04d74758
DM
1963 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1964 regs->tpc, tl);
4fe3ebec 1965 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6320bceb 1966 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4fe3ebec
DM
1967 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1968 (void *) regs->u_regs[UREG_I7]);
04d74758
DM
1969 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1970 "pte[%lx] error[%lx]\n",
6c8927c9
DM
1971 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1972 sun4v_err_itlb_pte, sun4v_err_itlb_error);
04d74758 1973
6c8927c9
DM
1974 prom_halt();
1975}
1976
1977unsigned long sun4v_err_dtlb_vaddr;
1978unsigned long sun4v_err_dtlb_ctx;
1979unsigned long sun4v_err_dtlb_pte;
1980unsigned long sun4v_err_dtlb_error;
1981
1982void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1983{
1984 if (tl > 1)
1985 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1986
04d74758
DM
1987 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1988 regs->tpc, tl);
4fe3ebec 1989 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6320bceb 1990 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4fe3ebec
DM
1991 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1992 (void *) regs->u_regs[UREG_I7]);
04d74758
DM
1993 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1994 "pte[%lx] error[%lx]\n",
6c8927c9
DM
1995 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1996 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
04d74758 1997
6c8927c9
DM
1998 prom_halt();
1999}
2000
2a3a5f5d
DM
2001void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2002{
2003 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2004 err, op);
2005}
2006
2007void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2008{
2009 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2010 err, op);
2011}
2012
1da177e4
LT
2013void do_fpe_common(struct pt_regs *regs)
2014{
2015 if (regs->tstate & TSTATE_PRIV) {
2016 regs->tpc = regs->tnpc;
2017 regs->tnpc += 4;
2018 } else {
2019 unsigned long fsr = current_thread_info()->xfsr[0];
2020 siginfo_t info;
2021
2022 if (test_thread_flag(TIF_32BIT)) {
2023 regs->tpc &= 0xffffffff;
2024 regs->tnpc &= 0xffffffff;
2025 }
2026 info.si_signo = SIGFPE;
2027 info.si_errno = 0;
2028 info.si_addr = (void __user *)regs->tpc;
2029 info.si_trapno = 0;
2030 info.si_code = __SI_FAULT;
2031 if ((fsr & 0x1c000) == (1 << 14)) {
2032 if (fsr & 0x10)
2033 info.si_code = FPE_FLTINV;
2034 else if (fsr & 0x08)
2035 info.si_code = FPE_FLTOVF;
2036 else if (fsr & 0x04)
2037 info.si_code = FPE_FLTUND;
2038 else if (fsr & 0x02)
2039 info.si_code = FPE_FLTDIV;
2040 else if (fsr & 0x01)
2041 info.si_code = FPE_FLTRES;
2042 }
2043 force_sig_info(SIGFPE, &info, current);
2044 }
2045}
2046
2047void do_fpieee(struct pt_regs *regs)
2048{
2049 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2050 0, 0x24, SIGFPE) == NOTIFY_STOP)
2051 return;
2052
2053 do_fpe_common(regs);
2054}
2055
2056extern int do_mathemu(struct pt_regs *, struct fpustate *);
2057
2058void do_fpother(struct pt_regs *regs)
2059{
2060 struct fpustate *f = FPUSTATE;
2061 int ret = 0;
2062
2063 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2064 0, 0x25, SIGFPE) == NOTIFY_STOP)
2065 return;
2066
2067 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2068 case (2 << 14): /* unfinished_FPop */
2069 case (3 << 14): /* unimplemented_FPop */
2070 ret = do_mathemu(regs, f);
2071 break;
2072 }
2073 if (ret)
2074 return;
2075 do_fpe_common(regs);
2076}
2077
2078void do_tof(struct pt_regs *regs)
2079{
2080 siginfo_t info;
2081
2082 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2083 0, 0x26, SIGEMT) == NOTIFY_STOP)
2084 return;
2085
2086 if (regs->tstate & TSTATE_PRIV)
2087 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2088 if (test_thread_flag(TIF_32BIT)) {
2089 regs->tpc &= 0xffffffff;
2090 regs->tnpc &= 0xffffffff;
2091 }
2092 info.si_signo = SIGEMT;
2093 info.si_errno = 0;
2094 info.si_code = EMT_TAGOVF;
2095 info.si_addr = (void __user *)regs->tpc;
2096 info.si_trapno = 0;
2097 force_sig_info(SIGEMT, &info, current);
2098}
2099
2100void do_div0(struct pt_regs *regs)
2101{
2102 siginfo_t info;
2103
2104 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2105 0, 0x28, SIGFPE) == NOTIFY_STOP)
2106 return;
2107
2108 if (regs->tstate & TSTATE_PRIV)
2109 die_if_kernel("TL0: Kernel divide by zero.", regs);
2110 if (test_thread_flag(TIF_32BIT)) {
2111 regs->tpc &= 0xffffffff;
2112 regs->tnpc &= 0xffffffff;
2113 }
2114 info.si_signo = SIGFPE;
2115 info.si_errno = 0;
2116 info.si_code = FPE_INTDIV;
2117 info.si_addr = (void __user *)regs->tpc;
2118 info.si_trapno = 0;
2119 force_sig_info(SIGFPE, &info, current);
2120}
2121
99cd2201 2122static void instruction_dump(unsigned int *pc)
1da177e4
LT
2123{
2124 int i;
2125
2126 if ((((unsigned long) pc) & 3))
2127 return;
2128
2129 printk("Instruction DUMP:");
2130 for (i = -3; i < 6; i++)
2131 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2132 printk("\n");
2133}
2134
99cd2201 2135static void user_instruction_dump(unsigned int __user *pc)
1da177e4
LT
2136{
2137 int i;
2138 unsigned int buf[9];
2139
2140 if ((((unsigned long) pc) & 3))
2141 return;
2142
2143 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2144 return;
2145
2146 printk("Instruction DUMP:");
2147 for (i = 0; i < 9; i++)
2148 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2149 printk("\n");
2150}
2151
2152void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2153{
77c664fa 2154 unsigned long fp, thread_base, ksp;
c1f193a7 2155 struct thread_info *tp;
1da177e4
LT
2156 int count = 0;
2157
2158 ksp = (unsigned long) _ksp;
c1f193a7
DM
2159 if (!tsk)
2160 tsk = current;
2161 tp = task_thread_info(tsk);
2162 if (ksp == 0UL) {
2163 if (tsk == current)
2164 asm("mov %%fp, %0" : "=r" (ksp));
2165 else
2166 ksp = tp->ksp;
2167 }
1da177e4
LT
2168 if (tp == current_thread_info())
2169 flushw_all();
2170
2171 fp = ksp + STACK_BIAS;
2172 thread_base = (unsigned long) tp;
2173
4fe3ebec 2174 printk("Call Trace:\n");
1da177e4 2175 do {
14d2c68b 2176 struct sparc_stackf *sf;
77c664fa
DM
2177 struct pt_regs *regs;
2178 unsigned long pc;
2179
4f70f7a9 2180 if (!kstack_valid(tp, fp))
1da177e4 2181 break;
14d2c68b
DM
2182 sf = (struct sparc_stackf *) fp;
2183 regs = (struct pt_regs *) (sf + 1);
77c664fa 2184
4f70f7a9 2185 if (kstack_is_trap_frame(tp, regs)) {
14d2c68b
DM
2186 if (!(regs->tstate & TSTATE_PRIV))
2187 break;
77c664fa
DM
2188 pc = regs->tpc;
2189 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2190 } else {
14d2c68b
DM
2191 pc = sf->callers_pc;
2192 fp = (unsigned long)sf->fp + STACK_BIAS;
77c664fa
DM
2193 }
2194
4fe3ebec 2195 printk(" [%016lx] %pS\n", pc, (void *) pc);
1da177e4 2196 } while (++count < 16);
1da177e4
LT
2197}
2198
2199void dump_stack(void)
2200{
c1f193a7 2201 show_stack(current, NULL);
1da177e4
LT
2202}
2203
2204EXPORT_SYMBOL(dump_stack);
2205
2206static inline int is_kernel_stack(struct task_struct *task,
2207 struct reg_window *rw)
2208{
2209 unsigned long rw_addr = (unsigned long) rw;
2210 unsigned long thread_base, thread_end;
2211
2212 if (rw_addr < PAGE_OFFSET) {
2213 if (task != &init_task)
2214 return 0;
2215 }
2216
ee3eea16 2217 thread_base = (unsigned long) task_stack_page(task);
1da177e4
LT
2218 thread_end = thread_base + sizeof(union thread_union);
2219 if (rw_addr >= thread_base &&
2220 rw_addr < thread_end &&
2221 !(rw_addr & 0x7UL))
2222 return 1;
2223
2224 return 0;
2225}
2226
2227static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2228{
2229 unsigned long fp = rw->ins[6];
2230
2231 if (!fp)
2232 return NULL;
2233
2234 return (struct reg_window *) (fp + STACK_BIAS);
2235}
2236
2237void die_if_kernel(char *str, struct pt_regs *regs)
2238{
2239 static int die_counter;
1da177e4
LT
2240 int count = 0;
2241
2242 /* Amuse the user. */
2243 printk(
2244" \\|/ ____ \\|/\n"
2245" \"@'/ .. \\`@\"\n"
2246" /_| \\__/ |_\\\n"
2247" \\__U_/\n");
2248
19c5870c 2249 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
1da177e4
LT
2250 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2251 __asm__ __volatile__("flushw");
dbf3e950 2252 show_regs(regs);
bcdcd8e7 2253 add_taint(TAINT_DIE);
1da177e4
LT
2254 if (regs->tstate & TSTATE_PRIV) {
2255 struct reg_window *rw = (struct reg_window *)
2256 (regs->u_regs[UREG_FP] + STACK_BIAS);
2257
2258 /* Stop the back trace when we hit userland or we
2259 * find some badly aligned kernel stack.
2260 */
2261 while (rw &&
2262 count++ < 30&&
2263 is_kernel_stack(current, rw)) {
4fe3ebec
DM
2264 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2265 (void *) rw->ins[7]);
1da177e4
LT
2266
2267 rw = kernel_stack_up(rw);
2268 }
2269 instruction_dump ((unsigned int *) regs->tpc);
2270 } else {
2271 if (test_thread_flag(TIF_32BIT)) {
2272 regs->tpc &= 0xffffffff;
2273 regs->tnpc &= 0xffffffff;
2274 }
2275 user_instruction_dump ((unsigned int __user *) regs->tpc);
2276 }
1da177e4
LT
2277 if (regs->tstate & TSTATE_PRIV)
2278 do_exit(SIGKILL);
2279 do_exit(SIGSEGV);
2280}
917c3660 2281EXPORT_SYMBOL(die_if_kernel);
1da177e4 2282
6e7726e1
DM
2283#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2284#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2285
1da177e4
LT
2286extern int handle_popc(u32 insn, struct pt_regs *regs);
2287extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2288
2289void do_illegal_instruction(struct pt_regs *regs)
2290{
2291 unsigned long pc = regs->tpc;
2292 unsigned long tstate = regs->tstate;
2293 u32 insn;
2294 siginfo_t info;
2295
2296 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2297 0, 0x10, SIGILL) == NOTIFY_STOP)
2298 return;
2299
2300 if (tstate & TSTATE_PRIV)
2301 die_if_kernel("Kernel illegal instruction", regs);
2302 if (test_thread_flag(TIF_32BIT))
2303 pc = (u32)pc;
2304 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2305 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2306 if (handle_popc(insn, regs))
2307 return;
2308 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2309 if (handle_ldf_stq(insn, regs))
2310 return;
0c51ed93 2311 } else if (tlb_type == hypervisor) {
6e7726e1
DM
2312 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2313 if (!vis_emul(regs, insn))
2314 return;
2315 } else {
2316 struct fpustate *f = FPUSTATE;
0c51ed93 2317
6e7726e1
DM
2318 /* XXX maybe verify XFSR bits like
2319 * XXX do_fpother() does?
2320 */
2321 if (do_mathemu(regs, f))
2322 return;
2323 }
1da177e4
LT
2324 }
2325 }
2326 info.si_signo = SIGILL;
2327 info.si_errno = 0;
2328 info.si_code = ILL_ILLOPC;
2329 info.si_addr = (void __user *)pc;
2330 info.si_trapno = 0;
2331 force_sig_info(SIGILL, &info, current);
2332}
2333
ed6b0b45
DM
2334extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2335
1da177e4
LT
2336void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2337{
2338 siginfo_t info;
2339
2340 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2341 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2342 return;
2343
2344 if (regs->tstate & TSTATE_PRIV) {
ed6b0b45 2345 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1da177e4
LT
2346 return;
2347 }
2348 info.si_signo = SIGBUS;
2349 info.si_errno = 0;
2350 info.si_code = BUS_ADRALN;
2351 info.si_addr = (void __user *)sfar;
2352 info.si_trapno = 0;
2353 force_sig_info(SIGBUS, &info, current);
2354}
2355
9f8a5b84 2356void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
ed6b0b45
DM
2357{
2358 siginfo_t info;
2359
2360 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2361 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2362 return;
2363
2364 if (regs->tstate & TSTATE_PRIV) {
2365 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2366 return;
2367 }
2368 info.si_signo = SIGBUS;
2369 info.si_errno = 0;
2370 info.si_code = BUS_ADRALN;
2371 info.si_addr = (void __user *) addr;
2372 info.si_trapno = 0;
2373 force_sig_info(SIGBUS, &info, current);
2374}
2375
1da177e4
LT
2376void do_privop(struct pt_regs *regs)
2377{
2378 siginfo_t info;
2379
2380 if (notify_die(DIE_TRAP, "privileged operation", regs,
2381 0, 0x11, SIGILL) == NOTIFY_STOP)
2382 return;
2383
2384 if (test_thread_flag(TIF_32BIT)) {
2385 regs->tpc &= 0xffffffff;
2386 regs->tnpc &= 0xffffffff;
2387 }
2388 info.si_signo = SIGILL;
2389 info.si_errno = 0;
2390 info.si_code = ILL_PRVOPC;
2391 info.si_addr = (void __user *)regs->tpc;
2392 info.si_trapno = 0;
2393 force_sig_info(SIGILL, &info, current);
2394}
2395
2396void do_privact(struct pt_regs *regs)
2397{
2398 do_privop(regs);
2399}
2400
2401/* Trap level 1 stuff or other traps we should never see... */
2402void do_cee(struct pt_regs *regs)
2403{
2404 die_if_kernel("TL0: Cache Error Exception", regs);
2405}
2406
2407void do_cee_tl1(struct pt_regs *regs)
2408{
2409 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2410 die_if_kernel("TL1: Cache Error Exception", regs);
2411}
2412
2413void do_dae_tl1(struct pt_regs *regs)
2414{
2415 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2416 die_if_kernel("TL1: Data Access Exception", regs);
2417}
2418
2419void do_iae_tl1(struct pt_regs *regs)
2420{
2421 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2422 die_if_kernel("TL1: Instruction Access Exception", regs);
2423}
2424
2425void do_div0_tl1(struct pt_regs *regs)
2426{
2427 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2428 die_if_kernel("TL1: DIV0 Exception", regs);
2429}
2430
2431void do_fpdis_tl1(struct pt_regs *regs)
2432{
2433 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2434 die_if_kernel("TL1: FPU Disabled", regs);
2435}
2436
2437void do_fpieee_tl1(struct pt_regs *regs)
2438{
2439 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2440 die_if_kernel("TL1: FPU IEEE Exception", regs);
2441}
2442
2443void do_fpother_tl1(struct pt_regs *regs)
2444{
2445 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2446 die_if_kernel("TL1: FPU Other Exception", regs);
2447}
2448
2449void do_ill_tl1(struct pt_regs *regs)
2450{
2451 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2452 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2453}
2454
2455void do_irq_tl1(struct pt_regs *regs)
2456{
2457 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2458 die_if_kernel("TL1: IRQ Exception", regs);
2459}
2460
2461void do_lddfmna_tl1(struct pt_regs *regs)
2462{
2463 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2464 die_if_kernel("TL1: LDDF Exception", regs);
2465}
2466
2467void do_stdfmna_tl1(struct pt_regs *regs)
2468{
2469 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2470 die_if_kernel("TL1: STDF Exception", regs);
2471}
2472
2473void do_paw(struct pt_regs *regs)
2474{
2475 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2476}
2477
2478void do_paw_tl1(struct pt_regs *regs)
2479{
2480 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2481 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2482}
2483
2484void do_vaw(struct pt_regs *regs)
2485{
2486 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2487}
2488
2489void do_vaw_tl1(struct pt_regs *regs)
2490{
2491 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2492 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2493}
2494
2495void do_tof_tl1(struct pt_regs *regs)
2496{
2497 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2498 die_if_kernel("TL1: Tag Overflow Exception", regs);
2499}
2500
2501void do_getpsr(struct pt_regs *regs)
2502{
2503 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2504 regs->tpc = regs->tnpc;
2505 regs->tnpc += 4;
2506 if (test_thread_flag(TIF_32BIT)) {
2507 regs->tpc &= 0xffffffff;
2508 regs->tnpc &= 0xffffffff;
2509 }
2510}
2511
56fb4df6 2512struct trap_per_cpu trap_block[NR_CPUS];
5a5488d3 2513EXPORT_SYMBOL(trap_block);
56fb4df6
DM
2514
2515/* This can get invoked before sched_init() so play it super safe
2516 * and use hard_smp_processor_id().
2517 */
9843099f 2518void notrace init_cur_cpu_trap(struct thread_info *t)
56fb4df6
DM
2519{
2520 int cpu = hard_smp_processor_id();
2521 struct trap_per_cpu *p = &trap_block[cpu];
2522
72aff53f 2523 p->thread = t;
56fb4df6
DM
2524 p->pgd_paddr = 0;
2525}
2526
1da177e4 2527extern void thread_info_offsets_are_bolixed_dave(void);
56fb4df6 2528extern void trap_per_cpu_offsets_are_bolixed_dave(void);
dcc1e8dd 2529extern void tsb_config_offsets_are_bolixed_dave(void);
1da177e4
LT
2530
2531/* Only invoked on boot processor. */
2532void __init trap_init(void)
2533{
2534 /* Compile time sanity check. */
82497789
DM
2535 BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2536 TI_FLAGS != offsetof(struct thread_info, flags) ||
2537 TI_CPU != offsetof(struct thread_info, cpu) ||
2538 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2539 TI_KSP != offsetof(struct thread_info, ksp) ||
2540 TI_FAULT_ADDR != offsetof(struct thread_info,
2541 fault_address) ||
2542 TI_KREGS != offsetof(struct thread_info, kregs) ||
2543 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2544 TI_EXEC_DOMAIN != offsetof(struct thread_info,
2545 exec_domain) ||
2546 TI_REG_WINDOW != offsetof(struct thread_info,
2547 reg_window) ||
2548 TI_RWIN_SPTRS != offsetof(struct thread_info,
2549 rwbuf_stkptrs) ||
2550 TI_GSR != offsetof(struct thread_info, gsr) ||
2551 TI_XFSR != offsetof(struct thread_info, xfsr) ||
82497789
DM
2552 TI_PRE_COUNT != offsetof(struct thread_info,
2553 preempt_count) ||
2554 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2555 TI_SYS_NOERROR != offsetof(struct thread_info,
2556 syscall_noerror) ||
2557 TI_RESTART_BLOCK != offsetof(struct thread_info,
2558 restart_block) ||
2559 TI_KUNA_REGS != offsetof(struct thread_info,
2560 kern_una_regs) ||
2561 TI_KUNA_INSN != offsetof(struct thread_info,
2562 kern_una_insn) ||
2563 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2564 (TI_FPREGS & (64 - 1)));
2565
2566 BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2567 thread) ||
2568 (TRAP_PER_CPU_PGD_PADDR !=
2569 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2570 (TRAP_PER_CPU_CPU_MONDO_PA !=
2571 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2572 (TRAP_PER_CPU_DEV_MONDO_PA !=
2573 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2574 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2575 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2576 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2577 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2578 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2579 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2580 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2581 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2582 (TRAP_PER_CPU_FAULT_INFO !=
2583 offsetof(struct trap_per_cpu, fault_info)) ||
2584 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2585 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2586 (TRAP_PER_CPU_CPU_LIST_PA !=
2587 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2588 (TRAP_PER_CPU_TSB_HUGE !=
2589 offsetof(struct trap_per_cpu, tsb_huge)) ||
2590 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2591 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2592 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2593 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2594 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2595 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2596 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2597 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2598 (TRAP_PER_CPU_RESUM_QMASK !=
2599 offsetof(struct trap_per_cpu, resum_qmask)) ||
2600 (TRAP_PER_CPU_NONRESUM_QMASK !=
2601 offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2602 (TRAP_PER_CPU_PER_CPU_BASE !=
2603 offsetof(struct trap_per_cpu, __per_cpu_base)));
2604
2605 BUILD_BUG_ON((TSB_CONFIG_TSB !=
2606 offsetof(struct tsb_config, tsb)) ||
2607 (TSB_CONFIG_RSS_LIMIT !=
2608 offsetof(struct tsb_config, tsb_rss_limit)) ||
2609 (TSB_CONFIG_NENTRIES !=
2610 offsetof(struct tsb_config, tsb_nentries)) ||
2611 (TSB_CONFIG_REG_VAL !=
2612 offsetof(struct tsb_config, tsb_reg_val)) ||
2613 (TSB_CONFIG_MAP_VADDR !=
2614 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2615 (TSB_CONFIG_MAP_PTE !=
2616 offsetof(struct tsb_config, tsb_map_pte)));
dcc1e8dd 2617
1da177e4
LT
2618 /* Attach to the address space of init_task. On SMP we
2619 * do this in smp.c:smp_callin for other cpus.
2620 */
2621 atomic_inc(&init_mm.mm_count);
2622 current->active_mm = &init_mm;
2623}