[SPARC64]: Disable smp_report_regs() for now.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / traps.c
CommitLineData
1da177e4
LT
1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
6c52a96e 36#include <asm/sfafsr.h>
1da177e4
LT
37#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
40#include <asm/kdebug.h>
92704a1c 41#include <asm/head.h>
1da177e4
LT
42#ifdef CONFIG_KMOD
43#include <linux/kmod.h>
44#endif
45
46struct notifier_block *sparc64die_chain;
47static DEFINE_SPINLOCK(die_notifier_lock);
48
49int register_die_notifier(struct notifier_block *nb)
50{
51 int err = 0;
52 unsigned long flags;
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
56 return err;
57}
58
59/* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
62 * is as follows:
63 */
64struct tl1_traplog {
65 struct {
66 unsigned long tstate;
67 unsigned long tpc;
68 unsigned long tnpc;
69 unsigned long tt;
70 } trapstack[4];
71 unsigned long tl;
72};
73
74static void dump_tl1_traplog(struct tl1_traplog *p)
75{
3d6395cb 76 int i, limit;
1da177e4
LT
77
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 p->tl);
3d6395cb
DM
80
81 limit = (tlb_type == hypervisor) ? 2 : 4;
1da177e4
LT
82 for (i = 0; i < 4; i++) {
83 printk(KERN_CRIT
84 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
85 "TNPC[%016lx] TT[%lx]\n",
86 i + 1,
87 p->trapstack[i].tstate, p->trapstack[i].tpc,
88 p->trapstack[i].tnpc, p->trapstack[i].tt);
89 }
90}
91
92void do_call_debug(struct pt_regs *regs)
93{
94 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
95}
96
97void bad_trap(struct pt_regs *regs, long lvl)
98{
99 char buffer[32];
100 siginfo_t info;
101
102 if (notify_die(DIE_TRAP, "bad trap", regs,
103 0, lvl, SIGTRAP) == NOTIFY_STOP)
104 return;
105
106 if (lvl < 0x100) {
107 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
108 die_if_kernel(buffer, regs);
109 }
110
111 lvl -= 0x100;
112 if (regs->tstate & TSTATE_PRIV) {
113 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
114 die_if_kernel(buffer, regs);
115 }
116 if (test_thread_flag(TIF_32BIT)) {
117 regs->tpc &= 0xffffffff;
118 regs->tnpc &= 0xffffffff;
119 }
120 info.si_signo = SIGILL;
121 info.si_errno = 0;
122 info.si_code = ILL_ILLTRP;
123 info.si_addr = (void __user *)regs->tpc;
124 info.si_trapno = lvl;
125 force_sig_info(SIGILL, &info, current);
126}
127
128void bad_trap_tl1(struct pt_regs *regs, long lvl)
129{
130 char buffer[32];
131
132 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
133 0, lvl, SIGTRAP) == NOTIFY_STOP)
134 return;
135
136 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
137
138 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
139 die_if_kernel (buffer, regs);
140}
141
142#ifdef CONFIG_DEBUG_BUGVERBOSE
143void do_BUG(const char *file, int line)
144{
145 bust_spinlocks(1);
146 printk("kernel BUG at %s:%d!\n", file, line);
147}
148#endif
149
6c52a96e 150void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
151{
152 siginfo_t info;
153
154 if (notify_die(DIE_TRAP, "instruction access exception", regs,
155 0, 0x8, SIGTRAP) == NOTIFY_STOP)
156 return;
157
158 if (regs->tstate & TSTATE_PRIV) {
6c52a96e
DM
159 printk("spitfire_insn_access_exception: SFSR[%016lx] "
160 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
161 die_if_kernel("Iax", regs);
162 }
163 if (test_thread_flag(TIF_32BIT)) {
164 regs->tpc &= 0xffffffff;
165 regs->tnpc &= 0xffffffff;
166 }
167 info.si_signo = SIGSEGV;
168 info.si_errno = 0;
169 info.si_code = SEGV_MAPERR;
170 info.si_addr = (void __user *)regs->tpc;
171 info.si_trapno = 0;
172 force_sig_info(SIGSEGV, &info, current);
173}
174
6c52a96e 175void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
176{
177 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
178 0, 0x8, SIGTRAP) == NOTIFY_STOP)
179 return;
180
181 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 182 spitfire_insn_access_exception(regs, sfsr, sfar);
1da177e4
LT
183}
184
ed6b0b45
DM
185void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
186{
187 unsigned short type = (type_ctx >> 16);
188 unsigned short ctx = (type_ctx & 0xffff);
189 siginfo_t info;
190
191 if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 return;
194
195 if (regs->tstate & TSTATE_PRIV) {
196 printk("sun4v_insn_access_exception: ADDR[%016lx] "
197 "CTX[%04x] TYPE[%04x], going.\n",
198 addr, ctx, type);
199 die_if_kernel("Iax", regs);
200 }
201
202 if (test_thread_flag(TIF_32BIT)) {
203 regs->tpc &= 0xffffffff;
204 regs->tnpc &= 0xffffffff;
205 }
206 info.si_signo = SIGSEGV;
207 info.si_errno = 0;
208 info.si_code = SEGV_MAPERR;
209 info.si_addr = (void __user *) addr;
210 info.si_trapno = 0;
211 force_sig_info(SIGSEGV, &info, current);
212}
213
214void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
215{
216 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
217 0, 0x8, SIGTRAP) == NOTIFY_STOP)
218 return;
219
220 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
221 sun4v_insn_access_exception(regs, addr, type_ctx);
222}
223
6c52a96e 224void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
225{
226 siginfo_t info;
227
228 if (notify_die(DIE_TRAP, "data access exception", regs,
229 0, 0x30, SIGTRAP) == NOTIFY_STOP)
230 return;
231
232 if (regs->tstate & TSTATE_PRIV) {
233 /* Test if this comes from uaccess places. */
8cf14af0 234 const struct exception_table_entry *entry;
1da177e4 235
8cf14af0
DM
236 entry = search_exception_tables(regs->tpc);
237 if (entry) {
238 /* Ouch, somebody is trying VM hole tricks on us... */
1da177e4
LT
239#ifdef DEBUG_EXCEPTIONS
240 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
8cf14af0
DM
241 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
242 regs->tpc, entry->fixup);
1da177e4 243#endif
8cf14af0 244 regs->tpc = entry->fixup;
1da177e4 245 regs->tnpc = regs->tpc + 4;
1da177e4
LT
246 return;
247 }
248 /* Shit... */
6c52a96e
DM
249 printk("spitfire_data_access_exception: SFSR[%016lx] "
250 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
251 die_if_kernel("Dax", regs);
252 }
253
254 info.si_signo = SIGSEGV;
255 info.si_errno = 0;
256 info.si_code = SEGV_MAPERR;
257 info.si_addr = (void __user *)sfar;
258 info.si_trapno = 0;
259 force_sig_info(SIGSEGV, &info, current);
260}
261
6c52a96e 262void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
bde4e4ee
DM
263{
264 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
265 0, 0x30, SIGTRAP) == NOTIFY_STOP)
266 return;
267
268 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 269 spitfire_data_access_exception(regs, sfsr, sfar);
bde4e4ee
DM
270}
271
ed6b0b45
DM
272void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
273{
274 unsigned short type = (type_ctx >> 16);
275 unsigned short ctx = (type_ctx & 0xffff);
276 siginfo_t info;
277
278 if (notify_die(DIE_TRAP, "data access exception", regs,
279 0, 0x8, SIGTRAP) == NOTIFY_STOP)
280 return;
281
282 if (regs->tstate & TSTATE_PRIV) {
283 printk("sun4v_data_access_exception: ADDR[%016lx] "
284 "CTX[%04x] TYPE[%04x], going.\n",
285 addr, ctx, type);
286 die_if_kernel("Iax", regs);
287 }
288
289 if (test_thread_flag(TIF_32BIT)) {
290 regs->tpc &= 0xffffffff;
291 regs->tnpc &= 0xffffffff;
292 }
293 info.si_signo = SIGSEGV;
294 info.si_errno = 0;
295 info.si_code = SEGV_MAPERR;
296 info.si_addr = (void __user *) addr;
297 info.si_trapno = 0;
298 force_sig_info(SIGSEGV, &info, current);
299}
300
301void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
302{
303 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
304 0, 0x8, SIGTRAP) == NOTIFY_STOP)
305 return;
306
307 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
308 sun4v_data_access_exception(regs, addr, type_ctx);
309}
310
1da177e4
LT
311#ifdef CONFIG_PCI
312/* This is really pathetic... */
313extern volatile int pci_poke_in_progress;
314extern volatile int pci_poke_cpu;
315extern volatile int pci_poke_faulted;
316#endif
317
318/* When access exceptions happen, we must do this. */
319static void spitfire_clean_and_reenable_l1_caches(void)
320{
321 unsigned long va;
322
323 if (tlb_type != spitfire)
324 BUG();
325
326 /* Clean 'em. */
327 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
328 spitfire_put_icache_tag(va, 0x0);
329 spitfire_put_dcache_tag(va, 0x0);
330 }
331
332 /* Re-enable in LSU. */
333 __asm__ __volatile__("flush %%g6\n\t"
334 "membar #Sync\n\t"
335 "stxa %0, [%%g0] %1\n\t"
336 "membar #Sync"
337 : /* no outputs */
338 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
339 LSU_CONTROL_IM | LSU_CONTROL_DM),
340 "i" (ASI_LSU_CONTROL)
341 : "memory");
342}
343
6c52a96e 344static void spitfire_enable_estate_errors(void)
1da177e4 345{
6c52a96e
DM
346 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
347 "membar #Sync"
348 : /* no outputs */
349 : "r" (ESTATE_ERR_ALL),
350 "i" (ASI_ESTATE_ERROR_EN));
1da177e4
LT
351}
352
353static char ecc_syndrome_table[] = {
354 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
355 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
356 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
357 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
358 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
359 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
360 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
361 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
362 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
363 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
364 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
365 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
366 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
367 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
368 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
369 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
370 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
371 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
372 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
373 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
374 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
375 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
376 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
377 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
378 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
379 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
380 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
381 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
382 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
383 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
384 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
385 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
386};
387
1da177e4
LT
388static char *syndrome_unknown = "<Unknown>";
389
6c52a96e 390static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
1da177e4 391{
6c52a96e
DM
392 unsigned short scode;
393 char memmod_str[64], *p;
1da177e4 394
6c52a96e
DM
395 if (udbl & bit) {
396 scode = ecc_syndrome_table[udbl & 0xff];
1da177e4
LT
397 if (prom_getunumber(scode, afar,
398 memmod_str, sizeof(memmod_str)) == -1)
399 p = syndrome_unknown;
400 else
401 p = memmod_str;
402 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
403 "Memory Module \"%s\"\n",
404 smp_processor_id(), scode, p);
405 }
406
6c52a96e
DM
407 if (udbh & bit) {
408 scode = ecc_syndrome_table[udbh & 0xff];
1da177e4
LT
409 if (prom_getunumber(scode, afar,
410 memmod_str, sizeof(memmod_str)) == -1)
411 p = syndrome_unknown;
412 else
413 p = memmod_str;
414 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
415 "Memory Module \"%s\"\n",
416 smp_processor_id(), scode, p);
417 }
6c52a96e
DM
418
419}
420
421static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
422{
423
424 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
425 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
426 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
427
428 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
429
430 /* We always log it, even if someone is listening for this
431 * trap.
432 */
433 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
434 0, TRAP_TYPE_CEE, SIGTRAP);
435
436 /* The Correctable ECC Error trap does not disable I/D caches. So
437 * we only have to restore the ESTATE Error Enable register.
438 */
439 spitfire_enable_estate_errors();
440}
441
442static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
443{
444 siginfo_t info;
445
446 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
447 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
448 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
449
450 /* XXX add more human friendly logging of the error status
451 * XXX as is implemented for cheetah
452 */
453
454 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
455
456 /* We always log it, even if someone is listening for this
457 * trap.
458 */
459 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
460 0, tt, SIGTRAP);
461
462 if (regs->tstate & TSTATE_PRIV) {
463 if (tl1)
464 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
465 die_if_kernel("UE", regs);
466 }
467
468 /* XXX need more intelligent processing here, such as is implemented
469 * XXX for cheetah errors, in fact if the E-cache still holds the
470 * XXX line with bad parity this will loop
471 */
472
473 spitfire_clean_and_reenable_l1_caches();
474 spitfire_enable_estate_errors();
475
476 if (test_thread_flag(TIF_32BIT)) {
477 regs->tpc &= 0xffffffff;
478 regs->tnpc &= 0xffffffff;
479 }
480 info.si_signo = SIGBUS;
481 info.si_errno = 0;
482 info.si_code = BUS_OBJERR;
483 info.si_addr = (void *)0;
484 info.si_trapno = 0;
485 force_sig_info(SIGBUS, &info, current);
486}
487
488void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
489{
490 unsigned long afsr, tt, udbh, udbl;
491 int tl1;
492
493 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
494 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
495 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
496 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
497 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
498
499#ifdef CONFIG_PCI
500 if (tt == TRAP_TYPE_DAE &&
501 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
502 spitfire_clean_and_reenable_l1_caches();
503 spitfire_enable_estate_errors();
504
505 pci_poke_faulted = 1;
506 regs->tnpc = regs->tpc + 4;
507 return;
508 }
509#endif
510
511 if (afsr & SFAFSR_UE)
512 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
513
514 if (tt == TRAP_TYPE_CEE) {
515 /* Handle the case where we took a CEE trap, but ACK'd
516 * only the UE state in the UDB error registers.
517 */
518 if (afsr & SFAFSR_UE) {
519 if (udbh & UDBE_CE) {
520 __asm__ __volatile__(
521 "stxa %0, [%1] %2\n\t"
522 "membar #Sync"
523 : /* no outputs */
524 : "r" (udbh & UDBE_CE),
525 "r" (0x0), "i" (ASI_UDB_ERROR_W));
526 }
527 if (udbl & UDBE_CE) {
528 __asm__ __volatile__(
529 "stxa %0, [%1] %2\n\t"
530 "membar #Sync"
531 : /* no outputs */
532 : "r" (udbl & UDBE_CE),
533 "r" (0x18), "i" (ASI_UDB_ERROR_W));
534 }
535 }
536
537 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
538 }
1da177e4
LT
539}
540
816242da
DM
541int cheetah_pcache_forced_on;
542
543void cheetah_enable_pcache(void)
544{
545 unsigned long dcr;
546
547 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
548 smp_processor_id());
549
550 __asm__ __volatile__("ldxa [%%g0] %1, %0"
551 : "=r" (dcr)
552 : "i" (ASI_DCU_CONTROL_REG));
553 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
554 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
555 "membar #Sync"
556 : /* no outputs */
557 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
558}
559
1da177e4
LT
560/* Cheetah error trap handling. */
561static unsigned long ecache_flush_physbase;
562static unsigned long ecache_flush_linesize;
563static unsigned long ecache_flush_size;
564
565/* WARNING: The error trap handlers in assembly know the precise
566 * layout of the following structure.
567 *
568 * C-level handlers below use this information to log the error
569 * and then determine how to recover (if possible).
570 */
571struct cheetah_err_info {
572/*0x00*/u64 afsr;
573/*0x08*/u64 afar;
574
575 /* D-cache state */
576/*0x10*/u64 dcache_data[4]; /* The actual data */
577/*0x30*/u64 dcache_index; /* D-cache index */
578/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
579/*0x40*/u64 dcache_utag; /* D-cache microtag */
580/*0x48*/u64 dcache_stag; /* D-cache snooptag */
581
582 /* I-cache state */
583/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
584/*0x90*/u64 icache_index; /* I-cache index */
585/*0x98*/u64 icache_tag; /* I-cache phys tag */
586/*0xa0*/u64 icache_utag; /* I-cache microtag */
587/*0xa8*/u64 icache_stag; /* I-cache snooptag */
588/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
589/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
590
591 /* E-cache state */
592/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
593/*0xe0*/u64 ecache_index; /* E-cache index */
594/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
595
596/*0xf0*/u64 __pad[32 - 30];
597};
598#define CHAFSR_INVALID ((u64)-1L)
599
600/* This table is ordered in priority of errors and matches the
601 * AFAR overwrite policy as well.
602 */
603
604struct afsr_error_table {
605 unsigned long mask;
606 const char *name;
607};
608
609static const char CHAFSR_PERR_msg[] =
610 "System interface protocol error";
611static const char CHAFSR_IERR_msg[] =
612 "Internal processor error";
613static const char CHAFSR_ISAP_msg[] =
614 "System request parity error on incoming addresss";
615static const char CHAFSR_UCU_msg[] =
616 "Uncorrectable E-cache ECC error for ifetch/data";
617static const char CHAFSR_UCC_msg[] =
618 "SW Correctable E-cache ECC error for ifetch/data";
619static const char CHAFSR_UE_msg[] =
620 "Uncorrectable system bus data ECC error for read";
621static const char CHAFSR_EDU_msg[] =
622 "Uncorrectable E-cache ECC error for stmerge/blkld";
623static const char CHAFSR_EMU_msg[] =
624 "Uncorrectable system bus MTAG error";
625static const char CHAFSR_WDU_msg[] =
626 "Uncorrectable E-cache ECC error for writeback";
627static const char CHAFSR_CPU_msg[] =
628 "Uncorrectable ECC error for copyout";
629static const char CHAFSR_CE_msg[] =
630 "HW corrected system bus data ECC error for read";
631static const char CHAFSR_EDC_msg[] =
632 "HW corrected E-cache ECC error for stmerge/blkld";
633static const char CHAFSR_EMC_msg[] =
634 "HW corrected system bus MTAG ECC error";
635static const char CHAFSR_WDC_msg[] =
636 "HW corrected E-cache ECC error for writeback";
637static const char CHAFSR_CPC_msg[] =
638 "HW corrected ECC error for copyout";
639static const char CHAFSR_TO_msg[] =
640 "Unmapped error from system bus";
641static const char CHAFSR_BERR_msg[] =
642 "Bus error response from system bus";
643static const char CHAFSR_IVC_msg[] =
644 "HW corrected system bus data ECC error for ivec read";
645static const char CHAFSR_IVU_msg[] =
646 "Uncorrectable system bus data ECC error for ivec read";
647static struct afsr_error_table __cheetah_error_table[] = {
648 { CHAFSR_PERR, CHAFSR_PERR_msg },
649 { CHAFSR_IERR, CHAFSR_IERR_msg },
650 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
651 { CHAFSR_UCU, CHAFSR_UCU_msg },
652 { CHAFSR_UCC, CHAFSR_UCC_msg },
653 { CHAFSR_UE, CHAFSR_UE_msg },
654 { CHAFSR_EDU, CHAFSR_EDU_msg },
655 { CHAFSR_EMU, CHAFSR_EMU_msg },
656 { CHAFSR_WDU, CHAFSR_WDU_msg },
657 { CHAFSR_CPU, CHAFSR_CPU_msg },
658 { CHAFSR_CE, CHAFSR_CE_msg },
659 { CHAFSR_EDC, CHAFSR_EDC_msg },
660 { CHAFSR_EMC, CHAFSR_EMC_msg },
661 { CHAFSR_WDC, CHAFSR_WDC_msg },
662 { CHAFSR_CPC, CHAFSR_CPC_msg },
663 { CHAFSR_TO, CHAFSR_TO_msg },
664 { CHAFSR_BERR, CHAFSR_BERR_msg },
665 /* These two do not update the AFAR. */
666 { CHAFSR_IVC, CHAFSR_IVC_msg },
667 { CHAFSR_IVU, CHAFSR_IVU_msg },
668 { 0, NULL },
669};
670static const char CHPAFSR_DTO_msg[] =
671 "System bus unmapped error for prefetch/storequeue-read";
672static const char CHPAFSR_DBERR_msg[] =
673 "System bus error for prefetch/storequeue-read";
674static const char CHPAFSR_THCE_msg[] =
675 "Hardware corrected E-cache Tag ECC error";
676static const char CHPAFSR_TSCE_msg[] =
677 "SW handled correctable E-cache Tag ECC error";
678static const char CHPAFSR_TUE_msg[] =
679 "Uncorrectable E-cache Tag ECC error";
680static const char CHPAFSR_DUE_msg[] =
681 "System bus uncorrectable data ECC error due to prefetch/store-fill";
682static struct afsr_error_table __cheetah_plus_error_table[] = {
683 { CHAFSR_PERR, CHAFSR_PERR_msg },
684 { CHAFSR_IERR, CHAFSR_IERR_msg },
685 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
686 { CHAFSR_UCU, CHAFSR_UCU_msg },
687 { CHAFSR_UCC, CHAFSR_UCC_msg },
688 { CHAFSR_UE, CHAFSR_UE_msg },
689 { CHAFSR_EDU, CHAFSR_EDU_msg },
690 { CHAFSR_EMU, CHAFSR_EMU_msg },
691 { CHAFSR_WDU, CHAFSR_WDU_msg },
692 { CHAFSR_CPU, CHAFSR_CPU_msg },
693 { CHAFSR_CE, CHAFSR_CE_msg },
694 { CHAFSR_EDC, CHAFSR_EDC_msg },
695 { CHAFSR_EMC, CHAFSR_EMC_msg },
696 { CHAFSR_WDC, CHAFSR_WDC_msg },
697 { CHAFSR_CPC, CHAFSR_CPC_msg },
698 { CHAFSR_TO, CHAFSR_TO_msg },
699 { CHAFSR_BERR, CHAFSR_BERR_msg },
700 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
701 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
702 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
703 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
704 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
705 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
706 /* These two do not update the AFAR. */
707 { CHAFSR_IVC, CHAFSR_IVC_msg },
708 { CHAFSR_IVU, CHAFSR_IVU_msg },
709 { 0, NULL },
710};
711static const char JPAFSR_JETO_msg[] =
712 "System interface protocol error, hw timeout caused";
713static const char JPAFSR_SCE_msg[] =
714 "Parity error on system snoop results";
715static const char JPAFSR_JEIC_msg[] =
716 "System interface protocol error, illegal command detected";
717static const char JPAFSR_JEIT_msg[] =
718 "System interface protocol error, illegal ADTYPE detected";
719static const char JPAFSR_OM_msg[] =
720 "Out of range memory error has occurred";
721static const char JPAFSR_ETP_msg[] =
722 "Parity error on L2 cache tag SRAM";
723static const char JPAFSR_UMS_msg[] =
724 "Error due to unsupported store";
725static const char JPAFSR_RUE_msg[] =
726 "Uncorrectable ECC error from remote cache/memory";
727static const char JPAFSR_RCE_msg[] =
728 "Correctable ECC error from remote cache/memory";
729static const char JPAFSR_BP_msg[] =
730 "JBUS parity error on returned read data";
731static const char JPAFSR_WBP_msg[] =
732 "JBUS parity error on data for writeback or block store";
733static const char JPAFSR_FRC_msg[] =
734 "Foreign read to DRAM incurring correctable ECC error";
735static const char JPAFSR_FRU_msg[] =
736 "Foreign read to DRAM incurring uncorrectable ECC error";
737static struct afsr_error_table __jalapeno_error_table[] = {
738 { JPAFSR_JETO, JPAFSR_JETO_msg },
739 { JPAFSR_SCE, JPAFSR_SCE_msg },
740 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
741 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
742 { CHAFSR_PERR, CHAFSR_PERR_msg },
743 { CHAFSR_IERR, CHAFSR_IERR_msg },
744 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
745 { CHAFSR_UCU, CHAFSR_UCU_msg },
746 { CHAFSR_UCC, CHAFSR_UCC_msg },
747 { CHAFSR_UE, CHAFSR_UE_msg },
748 { CHAFSR_EDU, CHAFSR_EDU_msg },
749 { JPAFSR_OM, JPAFSR_OM_msg },
750 { CHAFSR_WDU, CHAFSR_WDU_msg },
751 { CHAFSR_CPU, CHAFSR_CPU_msg },
752 { CHAFSR_CE, CHAFSR_CE_msg },
753 { CHAFSR_EDC, CHAFSR_EDC_msg },
754 { JPAFSR_ETP, JPAFSR_ETP_msg },
755 { CHAFSR_WDC, CHAFSR_WDC_msg },
756 { CHAFSR_CPC, CHAFSR_CPC_msg },
757 { CHAFSR_TO, CHAFSR_TO_msg },
758 { CHAFSR_BERR, CHAFSR_BERR_msg },
759 { JPAFSR_UMS, JPAFSR_UMS_msg },
760 { JPAFSR_RUE, JPAFSR_RUE_msg },
761 { JPAFSR_RCE, JPAFSR_RCE_msg },
762 { JPAFSR_BP, JPAFSR_BP_msg },
763 { JPAFSR_WBP, JPAFSR_WBP_msg },
764 { JPAFSR_FRC, JPAFSR_FRC_msg },
765 { JPAFSR_FRU, JPAFSR_FRU_msg },
766 /* These two do not update the AFAR. */
767 { CHAFSR_IVU, CHAFSR_IVU_msg },
768 { 0, NULL },
769};
770static struct afsr_error_table *cheetah_error_table;
771static unsigned long cheetah_afsr_errors;
772
773/* This is allocated at boot time based upon the largest hardware
774 * cpu ID in the system. We allocate two entries per cpu, one for
775 * TL==0 logging and one for TL >= 1 logging.
776 */
777struct cheetah_err_info *cheetah_error_log;
778
779static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
780{
781 struct cheetah_err_info *p;
782 int cpu = smp_processor_id();
783
784 if (!cheetah_error_log)
785 return NULL;
786
787 p = cheetah_error_log + (cpu * 2);
788 if ((afsr & CHAFSR_TL1) != 0UL)
789 p++;
790
791 return p;
792}
793
794extern unsigned int tl0_icpe[], tl1_icpe[];
795extern unsigned int tl0_dcpe[], tl1_dcpe[];
796extern unsigned int tl0_fecc[], tl1_fecc[];
797extern unsigned int tl0_cee[], tl1_cee[];
798extern unsigned int tl0_iae[], tl1_iae[];
799extern unsigned int tl0_dae[], tl1_dae[];
800extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
801extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
802extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
803extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
804extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
805
806void __init cheetah_ecache_flush_init(void)
807{
808 unsigned long largest_size, smallest_linesize, order, ver;
809 int node, i, instance;
810
811 /* Scan all cpu device tree nodes, note two values:
812 * 1) largest E-cache size
813 * 2) smallest E-cache line size
814 */
815 largest_size = 0UL;
816 smallest_linesize = ~0UL;
817
818 instance = 0;
819 while (!cpu_find_by_instance(instance, &node, NULL)) {
820 unsigned long val;
821
822 val = prom_getintdefault(node, "ecache-size",
823 (2 * 1024 * 1024));
824 if (val > largest_size)
825 largest_size = val;
826 val = prom_getintdefault(node, "ecache-line-size", 64);
827 if (val < smallest_linesize)
828 smallest_linesize = val;
829 instance++;
830 }
831
832 if (largest_size == 0UL || smallest_linesize == ~0UL) {
833 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
834 "parameters.\n");
835 prom_halt();
836 }
837
838 ecache_flush_size = (2 * largest_size);
839 ecache_flush_linesize = smallest_linesize;
840
10147570 841 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
1da177e4 842
10147570 843 if (ecache_flush_physbase == ~0UL) {
1da177e4 844 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
10147570
DM
845 "contiguous physical memory.\n",
846 ecache_flush_size);
1da177e4
LT
847 prom_halt();
848 }
849
850 /* Now allocate error trap reporting scoreboard. */
851 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
852 for (order = 0; order < MAX_ORDER; order++) {
853 if ((PAGE_SIZE << order) >= node)
854 break;
855 }
856 cheetah_error_log = (struct cheetah_err_info *)
857 __get_free_pages(GFP_KERNEL, order);
858 if (!cheetah_error_log) {
859 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
860 "error logging scoreboard (%d bytes).\n", node);
861 prom_halt();
862 }
863 memset(cheetah_error_log, 0, PAGE_SIZE << order);
864
865 /* Mark all AFSRs as invalid so that the trap handler will
866 * log new new information there.
867 */
868 for (i = 0; i < 2 * NR_CPUS; i++)
869 cheetah_error_log[i].afsr = CHAFSR_INVALID;
870
871 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
872 if ((ver >> 32) == __JALAPENO_ID ||
873 (ver >> 32) == __SERRANO_ID) {
1da177e4
LT
874 cheetah_error_table = &__jalapeno_error_table[0];
875 cheetah_afsr_errors = JPAFSR_ERRORS;
876 } else if ((ver >> 32) == 0x003e0015) {
877 cheetah_error_table = &__cheetah_plus_error_table[0];
878 cheetah_afsr_errors = CHPAFSR_ERRORS;
879 } else {
880 cheetah_error_table = &__cheetah_error_table[0];
881 cheetah_afsr_errors = CHAFSR_ERRORS;
882 }
883
884 /* Now patch trap tables. */
885 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
886 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
887 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
888 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
890 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
891 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
892 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
893 if (tlb_type == cheetah_plus) {
894 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
895 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
896 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
897 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
898 }
899 flushi(PAGE_OFFSET);
900}
901
902static void cheetah_flush_ecache(void)
903{
904 unsigned long flush_base = ecache_flush_physbase;
905 unsigned long flush_linesize = ecache_flush_linesize;
906 unsigned long flush_size = ecache_flush_size;
907
908 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
909 " bne,pt %%xcc, 1b\n\t"
910 " ldxa [%2 + %0] %3, %%g0\n\t"
911 : "=&r" (flush_size)
912 : "0" (flush_size), "r" (flush_base),
913 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
914}
915
916static void cheetah_flush_ecache_line(unsigned long physaddr)
917{
918 unsigned long alias;
919
920 physaddr &= ~(8UL - 1UL);
921 physaddr = (ecache_flush_physbase +
922 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
923 alias = physaddr + (ecache_flush_size >> 1UL);
924 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
925 "ldxa [%1] %2, %%g0\n\t"
926 "membar #Sync"
927 : /* no outputs */
928 : "r" (physaddr), "r" (alias),
929 "i" (ASI_PHYS_USE_EC));
930}
931
932/* Unfortunately, the diagnostic access to the I-cache tags we need to
933 * use to clear the thing interferes with I-cache coherency transactions.
934 *
935 * So we must only flush the I-cache when it is disabled.
936 */
937static void __cheetah_flush_icache(void)
938{
80dc0d6b
DM
939 unsigned int icache_size, icache_line_size;
940 unsigned long addr;
941
942 icache_size = local_cpu_data().icache_size;
943 icache_line_size = local_cpu_data().icache_line_size;
1da177e4
LT
944
945 /* Clear the valid bits in all the tags. */
80dc0d6b 946 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1da177e4
LT
947 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
948 "membar #Sync"
949 : /* no outputs */
80dc0d6b
DM
950 : "r" (addr | (2 << 3)),
951 "i" (ASI_IC_TAG));
1da177e4
LT
952 }
953}
954
955static void cheetah_flush_icache(void)
956{
957 unsigned long dcu_save;
958
959 /* Save current DCU, disable I-cache. */
960 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
961 "or %0, %2, %%g1\n\t"
962 "stxa %%g1, [%%g0] %1\n\t"
963 "membar #Sync"
964 : "=r" (dcu_save)
965 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
966 : "g1");
967
968 __cheetah_flush_icache();
969
970 /* Restore DCU register */
971 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
972 "membar #Sync"
973 : /* no outputs */
974 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
975}
976
977static void cheetah_flush_dcache(void)
978{
80dc0d6b
DM
979 unsigned int dcache_size, dcache_line_size;
980 unsigned long addr;
981
982 dcache_size = local_cpu_data().dcache_size;
983 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 984
80dc0d6b 985 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1da177e4
LT
986 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
987 "membar #Sync"
988 : /* no outputs */
80dc0d6b 989 : "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
990 }
991}
992
993/* In order to make the even parity correct we must do two things.
994 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
995 * Next, we clear out all 32-bytes of data for that line. Data of
996 * all-zero + tag parity value of zero == correct parity.
997 */
998static void cheetah_plus_zap_dcache_parity(void)
999{
80dc0d6b
DM
1000 unsigned int dcache_size, dcache_line_size;
1001 unsigned long addr;
1002
1003 dcache_size = local_cpu_data().dcache_size;
1004 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 1005
80dc0d6b
DM
1006 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1007 unsigned long tag = (addr >> 14);
1008 unsigned long line;
1da177e4
LT
1009
1010 __asm__ __volatile__("membar #Sync\n\t"
1011 "stxa %0, [%1] %2\n\t"
1012 "membar #Sync"
1013 : /* no outputs */
80dc0d6b 1014 : "r" (tag), "r" (addr),
1da177e4 1015 "i" (ASI_DCACHE_UTAG));
80dc0d6b 1016 for (line = addr; line < addr + dcache_line_size; line += 8)
1da177e4
LT
1017 __asm__ __volatile__("membar #Sync\n\t"
1018 "stxa %%g0, [%0] %1\n\t"
1019 "membar #Sync"
1020 : /* no outputs */
80dc0d6b
DM
1021 : "r" (line),
1022 "i" (ASI_DCACHE_DATA));
1da177e4
LT
1023 }
1024}
1025
1026/* Conversion tables used to frob Cheetah AFSR syndrome values into
1027 * something palatable to the memory controller driver get_unumber
1028 * routine.
1029 */
1030#define MT0 137
1031#define MT1 138
1032#define MT2 139
1033#define NONE 254
1034#define MTC0 140
1035#define MTC1 141
1036#define MTC2 142
1037#define MTC3 143
1038#define C0 128
1039#define C1 129
1040#define C2 130
1041#define C3 131
1042#define C4 132
1043#define C5 133
1044#define C6 134
1045#define C7 135
1046#define C8 136
1047#define M2 144
1048#define M3 145
1049#define M4 146
1050#define M 147
1051static unsigned char cheetah_ecc_syntab[] = {
1052/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1053/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1054/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1055/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1056/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1057/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1058/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1059/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1060/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1061/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1062/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1063/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1064/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1065/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1066/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1067/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1068/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1069/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1070/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1071/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1072/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1073/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1074/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1075/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1076/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1077/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1078/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1079/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1080/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1081/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1082/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1083/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1084};
1085static unsigned char cheetah_mtag_syntab[] = {
1086 NONE, MTC0,
1087 MTC1, NONE,
1088 MTC2, NONE,
1089 NONE, MT0,
1090 MTC3, NONE,
1091 NONE, MT1,
1092 NONE, MT2,
1093 NONE, NONE
1094};
1095
1096/* Return the highest priority error conditon mentioned. */
1097static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1098{
1099 unsigned long tmp = 0;
1100 int i;
1101
1102 for (i = 0; cheetah_error_table[i].mask; i++) {
1103 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1104 return tmp;
1105 }
1106 return tmp;
1107}
1108
1109static const char *cheetah_get_string(unsigned long bit)
1110{
1111 int i;
1112
1113 for (i = 0; cheetah_error_table[i].mask; i++) {
1114 if ((bit & cheetah_error_table[i].mask) != 0UL)
1115 return cheetah_error_table[i].name;
1116 }
1117 return "???";
1118}
1119
1120extern int chmc_getunumber(int, unsigned long, char *, int);
1121
1122static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1123 unsigned long afsr, unsigned long afar, int recoverable)
1124{
1125 unsigned long hipri;
1126 char unum[256];
1127
1128 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1129 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1130 afsr, afar,
1131 (afsr & CHAFSR_TL1) ? 1 : 0);
1132 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 regs->tpc, regs->tnpc, regs->tstate);
1135 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1138 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1139 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1140 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1141 hipri = cheetah_get_hipri(afsr);
1142 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1143 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1144 hipri, cheetah_get_string(hipri));
1145
1146 /* Try to get unumber if relevant. */
1147#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1148 CHAFSR_CPC | CHAFSR_CPU | \
1149 CHAFSR_UE | CHAFSR_CE | \
1150 CHAFSR_EDC | CHAFSR_EDU | \
1151 CHAFSR_UCC | CHAFSR_UCU | \
1152 CHAFSR_WDU | CHAFSR_WDC)
1153#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1154 if (afsr & ESYND_ERRORS) {
1155 int syndrome;
1156 int ret;
1157
1158 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1159 syndrome = cheetah_ecc_syntab[syndrome];
1160 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1161 if (ret != -1)
1162 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT),
1164 smp_processor_id(), unum);
1165 } else if (afsr & MSYND_ERRORS) {
1166 int syndrome;
1167 int ret;
1168
1169 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1170 syndrome = cheetah_mtag_syntab[syndrome];
1171 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1172 if (ret != -1)
1173 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT),
1175 smp_processor_id(), unum);
1176 }
1177
1178 /* Now dump the cache snapshots. */
1179 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1180 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181 (int) info->dcache_index,
1182 info->dcache_tag,
1183 info->dcache_utag,
1184 info->dcache_stag);
1185 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1186 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1187 info->dcache_data[0],
1188 info->dcache_data[1],
1189 info->dcache_data[2],
1190 info->dcache_data[3]);
1191 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1192 "u[%016lx] l[%016lx]\n",
1193 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1194 (int) info->icache_index,
1195 info->icache_tag,
1196 info->icache_utag,
1197 info->icache_stag,
1198 info->icache_upper,
1199 info->icache_lower);
1200 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1201 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 info->icache_data[0],
1203 info->icache_data[1],
1204 info->icache_data[2],
1205 info->icache_data[3]);
1206 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1207 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1208 info->icache_data[4],
1209 info->icache_data[5],
1210 info->icache_data[6],
1211 info->icache_data[7]);
1212 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1213 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1214 (int) info->ecache_index, info->ecache_tag);
1215 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1216 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1217 info->ecache_data[0],
1218 info->ecache_data[1],
1219 info->ecache_data[2],
1220 info->ecache_data[3]);
1221
1222 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1223 while (afsr != 0UL) {
1224 unsigned long bit = cheetah_get_hipri(afsr);
1225
1226 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1227 (recoverable ? KERN_WARNING : KERN_CRIT),
1228 bit, cheetah_get_string(bit));
1229
1230 afsr &= ~bit;
1231 }
1232
1233 if (!recoverable)
1234 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1235}
1236
1237static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1238{
1239 unsigned long afsr, afar;
1240 int ret = 0;
1241
1242 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1243 : "=r" (afsr)
1244 : "i" (ASI_AFSR));
1245 if ((afsr & cheetah_afsr_errors) != 0) {
1246 if (logp != NULL) {
1247 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1248 : "=r" (afar)
1249 : "i" (ASI_AFAR));
1250 logp->afsr = afsr;
1251 logp->afar = afar;
1252 }
1253 ret = 1;
1254 }
1255 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1256 "membar #Sync\n\t"
1257 : : "r" (afsr), "i" (ASI_AFSR));
1258
1259 return ret;
1260}
1261
1262void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1263{
1264 struct cheetah_err_info local_snapshot, *p;
1265 int recoverable;
1266
1267 /* Flush E-cache */
1268 cheetah_flush_ecache();
1269
1270 p = cheetah_get_error_log(afsr);
1271 if (!p) {
1272 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1273 afsr, afar);
1274 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1275 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1276 prom_halt();
1277 }
1278
1279 /* Grab snapshot of logged error. */
1280 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1281
1282 /* If the current trap snapshot does not match what the
1283 * trap handler passed along into our args, big trouble.
1284 * In such a case, mark the local copy as invalid.
1285 *
1286 * Else, it matches and we mark the afsr in the non-local
1287 * copy as invalid so we may log new error traps there.
1288 */
1289 if (p->afsr != afsr || p->afar != afar)
1290 local_snapshot.afsr = CHAFSR_INVALID;
1291 else
1292 p->afsr = CHAFSR_INVALID;
1293
1294 cheetah_flush_icache();
1295 cheetah_flush_dcache();
1296
1297 /* Re-enable I-cache/D-cache */
1298 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1299 "or %%g1, %1, %%g1\n\t"
1300 "stxa %%g1, [%%g0] %0\n\t"
1301 "membar #Sync"
1302 : /* no outputs */
1303 : "i" (ASI_DCU_CONTROL_REG),
1304 "i" (DCU_DC | DCU_IC)
1305 : "g1");
1306
1307 /* Re-enable error reporting */
1308 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1309 "or %%g1, %1, %%g1\n\t"
1310 "stxa %%g1, [%%g0] %0\n\t"
1311 "membar #Sync"
1312 : /* no outputs */
1313 : "i" (ASI_ESTATE_ERROR_EN),
1314 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1315 : "g1");
1316
1317 /* Decide if we can continue after handling this trap and
1318 * logging the error.
1319 */
1320 recoverable = 1;
1321 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1322 recoverable = 0;
1323
1324 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1325 * error was logged while we had error reporting traps disabled.
1326 */
1327 if (cheetah_recheck_errors(&local_snapshot)) {
1328 unsigned long new_afsr = local_snapshot.afsr;
1329
1330 /* If we got a new asynchronous error, die... */
1331 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1332 CHAFSR_WDU | CHAFSR_CPU |
1333 CHAFSR_IVU | CHAFSR_UE |
1334 CHAFSR_BERR | CHAFSR_TO))
1335 recoverable = 0;
1336 }
1337
1338 /* Log errors. */
1339 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1340
1341 if (!recoverable)
1342 panic("Irrecoverable Fast-ECC error trap.\n");
1343
1344 /* Flush E-cache to kick the error trap handlers out. */
1345 cheetah_flush_ecache();
1346}
1347
1348/* Try to fix a correctable error by pushing the line out from
1349 * the E-cache. Recheck error reporting registers to see if the
1350 * problem is intermittent.
1351 */
1352static int cheetah_fix_ce(unsigned long physaddr)
1353{
1354 unsigned long orig_estate;
1355 unsigned long alias1, alias2;
1356 int ret;
1357
1358 /* Make sure correctable error traps are disabled. */
1359 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1360 "andn %0, %1, %%g1\n\t"
1361 "stxa %%g1, [%%g0] %2\n\t"
1362 "membar #Sync"
1363 : "=&r" (orig_estate)
1364 : "i" (ESTATE_ERROR_CEEN),
1365 "i" (ASI_ESTATE_ERROR_EN)
1366 : "g1");
1367
1368 /* We calculate alias addresses that will force the
1369 * cache line in question out of the E-cache. Then
1370 * we bring it back in with an atomic instruction so
1371 * that we get it in some modified/exclusive state,
1372 * then we displace it again to try and get proper ECC
1373 * pushed back into the system.
1374 */
1375 physaddr &= ~(8UL - 1UL);
1376 alias1 = (ecache_flush_physbase +
1377 (physaddr & ((ecache_flush_size >> 1) - 1)));
1378 alias2 = alias1 + (ecache_flush_size >> 1);
1379 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1380 "ldxa [%1] %3, %%g0\n\t"
1381 "casxa [%2] %3, %%g0, %%g0\n\t"
1382 "membar #StoreLoad | #StoreStore\n\t"
1383 "ldxa [%0] %3, %%g0\n\t"
1384 "ldxa [%1] %3, %%g0\n\t"
1385 "membar #Sync"
1386 : /* no outputs */
1387 : "r" (alias1), "r" (alias2),
1388 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1389
1390 /* Did that trigger another error? */
1391 if (cheetah_recheck_errors(NULL)) {
1392 /* Try one more time. */
1393 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1394 "membar #Sync"
1395 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1396 if (cheetah_recheck_errors(NULL))
1397 ret = 2;
1398 else
1399 ret = 1;
1400 } else {
1401 /* No new error, intermittent problem. */
1402 ret = 0;
1403 }
1404
1405 /* Restore error enables. */
1406 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1407 "membar #Sync"
1408 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1409
1410 return ret;
1411}
1412
1413/* Return non-zero if PADDR is a valid physical memory address. */
1414static int cheetah_check_main_memory(unsigned long paddr)
1415{
10147570 1416 unsigned long vaddr = PAGE_OFFSET + paddr;
1da177e4 1417
13edad7a 1418 if (vaddr > (unsigned long) high_memory)
ed3ffaf7
DM
1419 return 0;
1420
10147570 1421 return kern_addr_valid(vaddr);
1da177e4
LT
1422}
1423
1424void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1425{
1426 struct cheetah_err_info local_snapshot, *p;
1427 int recoverable, is_memory;
1428
1429 p = cheetah_get_error_log(afsr);
1430 if (!p) {
1431 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1432 afsr, afar);
1433 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1434 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1435 prom_halt();
1436 }
1437
1438 /* Grab snapshot of logged error. */
1439 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1440
1441 /* If the current trap snapshot does not match what the
1442 * trap handler passed along into our args, big trouble.
1443 * In such a case, mark the local copy as invalid.
1444 *
1445 * Else, it matches and we mark the afsr in the non-local
1446 * copy as invalid so we may log new error traps there.
1447 */
1448 if (p->afsr != afsr || p->afar != afar)
1449 local_snapshot.afsr = CHAFSR_INVALID;
1450 else
1451 p->afsr = CHAFSR_INVALID;
1452
1453 is_memory = cheetah_check_main_memory(afar);
1454
1455 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1456 /* XXX Might want to log the results of this operation
1457 * XXX somewhere... -DaveM
1458 */
1459 cheetah_fix_ce(afar);
1460 }
1461
1462 {
1463 int flush_all, flush_line;
1464
1465 flush_all = flush_line = 0;
1466 if ((afsr & CHAFSR_EDC) != 0UL) {
1467 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1468 flush_line = 1;
1469 else
1470 flush_all = 1;
1471 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1472 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1473 flush_line = 1;
1474 else
1475 flush_all = 1;
1476 }
1477
1478 /* Trap handler only disabled I-cache, flush it. */
1479 cheetah_flush_icache();
1480
1481 /* Re-enable I-cache */
1482 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1483 "or %%g1, %1, %%g1\n\t"
1484 "stxa %%g1, [%%g0] %0\n\t"
1485 "membar #Sync"
1486 : /* no outputs */
1487 : "i" (ASI_DCU_CONTROL_REG),
1488 "i" (DCU_IC)
1489 : "g1");
1490
1491 if (flush_all)
1492 cheetah_flush_ecache();
1493 else if (flush_line)
1494 cheetah_flush_ecache_line(afar);
1495 }
1496
1497 /* Re-enable error reporting */
1498 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1499 "or %%g1, %1, %%g1\n\t"
1500 "stxa %%g1, [%%g0] %0\n\t"
1501 "membar #Sync"
1502 : /* no outputs */
1503 : "i" (ASI_ESTATE_ERROR_EN),
1504 "i" (ESTATE_ERROR_CEEN)
1505 : "g1");
1506
1507 /* Decide if we can continue after handling this trap and
1508 * logging the error.
1509 */
1510 recoverable = 1;
1511 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1512 recoverable = 0;
1513
1514 /* Re-check AFSR/AFAR */
1515 (void) cheetah_recheck_errors(&local_snapshot);
1516
1517 /* Log errors. */
1518 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1519
1520 if (!recoverable)
1521 panic("Irrecoverable Correctable-ECC error trap.\n");
1522}
1523
1524void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1525{
1526 struct cheetah_err_info local_snapshot, *p;
1527 int recoverable, is_memory;
1528
1529#ifdef CONFIG_PCI
1530 /* Check for the special PCI poke sequence. */
1531 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1532 cheetah_flush_icache();
1533 cheetah_flush_dcache();
1534
1535 /* Re-enable I-cache/D-cache */
1536 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1537 "or %%g1, %1, %%g1\n\t"
1538 "stxa %%g1, [%%g0] %0\n\t"
1539 "membar #Sync"
1540 : /* no outputs */
1541 : "i" (ASI_DCU_CONTROL_REG),
1542 "i" (DCU_DC | DCU_IC)
1543 : "g1");
1544
1545 /* Re-enable error reporting */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1549 "membar #Sync"
1550 : /* no outputs */
1551 : "i" (ASI_ESTATE_ERROR_EN),
1552 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1553 : "g1");
1554
1555 (void) cheetah_recheck_errors(NULL);
1556
1557 pci_poke_faulted = 1;
1558 regs->tpc += 4;
1559 regs->tnpc = regs->tpc + 4;
1560 return;
1561 }
1562#endif
1563
1564 p = cheetah_get_error_log(afsr);
1565 if (!p) {
1566 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1567 afsr, afar);
1568 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1569 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1570 prom_halt();
1571 }
1572
1573 /* Grab snapshot of logged error. */
1574 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1575
1576 /* If the current trap snapshot does not match what the
1577 * trap handler passed along into our args, big trouble.
1578 * In such a case, mark the local copy as invalid.
1579 *
1580 * Else, it matches and we mark the afsr in the non-local
1581 * copy as invalid so we may log new error traps there.
1582 */
1583 if (p->afsr != afsr || p->afar != afar)
1584 local_snapshot.afsr = CHAFSR_INVALID;
1585 else
1586 p->afsr = CHAFSR_INVALID;
1587
1588 is_memory = cheetah_check_main_memory(afar);
1589
1590 {
1591 int flush_all, flush_line;
1592
1593 flush_all = flush_line = 0;
1594 if ((afsr & CHAFSR_EDU) != 0UL) {
1595 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1596 flush_line = 1;
1597 else
1598 flush_all = 1;
1599 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1600 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1601 flush_line = 1;
1602 else
1603 flush_all = 1;
1604 }
1605
1606 cheetah_flush_icache();
1607 cheetah_flush_dcache();
1608
1609 /* Re-enable I/D caches */
1610 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1611 "or %%g1, %1, %%g1\n\t"
1612 "stxa %%g1, [%%g0] %0\n\t"
1613 "membar #Sync"
1614 : /* no outputs */
1615 : "i" (ASI_DCU_CONTROL_REG),
1616 "i" (DCU_IC | DCU_DC)
1617 : "g1");
1618
1619 if (flush_all)
1620 cheetah_flush_ecache();
1621 else if (flush_line)
1622 cheetah_flush_ecache_line(afar);
1623 }
1624
1625 /* Re-enable error reporting */
1626 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1627 "or %%g1, %1, %%g1\n\t"
1628 "stxa %%g1, [%%g0] %0\n\t"
1629 "membar #Sync"
1630 : /* no outputs */
1631 : "i" (ASI_ESTATE_ERROR_EN),
1632 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1633 : "g1");
1634
1635 /* Decide if we can continue after handling this trap and
1636 * logging the error.
1637 */
1638 recoverable = 1;
1639 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1640 recoverable = 0;
1641
1642 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1643 * error was logged while we had error reporting traps disabled.
1644 */
1645 if (cheetah_recheck_errors(&local_snapshot)) {
1646 unsigned long new_afsr = local_snapshot.afsr;
1647
1648 /* If we got a new asynchronous error, die... */
1649 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1650 CHAFSR_WDU | CHAFSR_CPU |
1651 CHAFSR_IVU | CHAFSR_UE |
1652 CHAFSR_BERR | CHAFSR_TO))
1653 recoverable = 0;
1654 }
1655
1656 /* Log errors. */
1657 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1658
1659 /* "Recoverable" here means we try to yank the page from ever
1660 * being newly used again. This depends upon a few things:
1661 * 1) Must be main memory, and AFAR must be valid.
1662 * 2) If we trapped from user, OK.
1663 * 3) Else, if we trapped from kernel we must find exception
1664 * table entry (ie. we have to have been accessing user
1665 * space).
1666 *
1667 * If AFAR is not in main memory, or we trapped from kernel
1668 * and cannot find an exception table entry, it is unacceptable
1669 * to try and continue.
1670 */
1671 if (recoverable && is_memory) {
1672 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1673 /* OK, usermode access. */
1674 recoverable = 1;
1675 } else {
8cf14af0 1676 const struct exception_table_entry *entry;
1da177e4 1677
8cf14af0
DM
1678 entry = search_exception_tables(regs->tpc);
1679 if (entry) {
1da177e4
LT
1680 /* OK, kernel access to userspace. */
1681 recoverable = 1;
1682
1683 } else {
1684 /* BAD, privileged state is corrupted. */
1685 recoverable = 0;
1686 }
1687
1688 if (recoverable) {
1689 if (pfn_valid(afar >> PAGE_SHIFT))
1690 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1691 else
1692 recoverable = 0;
1693
1694 /* Only perform fixup if we still have a
1695 * recoverable condition.
1696 */
1697 if (recoverable) {
8cf14af0 1698 regs->tpc = entry->fixup;
1da177e4 1699 regs->tnpc = regs->tpc + 4;
1da177e4
LT
1700 }
1701 }
1702 }
1703 } else {
1704 recoverable = 0;
1705 }
1706
1707 if (!recoverable)
1708 panic("Irrecoverable deferred error trap.\n");
1709}
1710
1711/* Handle a D/I cache parity error trap. TYPE is encoded as:
1712 *
1713 * Bit0: 0=dcache,1=icache
1714 * Bit1: 0=recoverable,1=unrecoverable
1715 *
1716 * The hardware has disabled both the I-cache and D-cache in
1717 * the %dcr register.
1718 */
1719void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1720{
1721 if (type & 0x1)
1722 __cheetah_flush_icache();
1723 else
1724 cheetah_plus_zap_dcache_parity();
1725 cheetah_flush_dcache();
1726
1727 /* Re-enable I-cache/D-cache */
1728 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1729 "or %%g1, %1, %%g1\n\t"
1730 "stxa %%g1, [%%g0] %0\n\t"
1731 "membar #Sync"
1732 : /* no outputs */
1733 : "i" (ASI_DCU_CONTROL_REG),
1734 "i" (DCU_DC | DCU_IC)
1735 : "g1");
1736
1737 if (type & 0x2) {
1738 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1739 smp_processor_id(),
1740 (type & 0x1) ? 'I' : 'D',
1741 regs->tpc);
1742 panic("Irrecoverable Cheetah+ parity error.");
1743 }
1744
1745 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1746 smp_processor_id(),
1747 (type & 0x1) ? 'I' : 'D',
1748 regs->tpc);
1749}
1750
5b0c0572
DM
1751struct sun4v_error_entry {
1752 u64 err_handle;
1753 u64 err_stick;
1754
1755 u32 err_type;
1756#define SUN4V_ERR_TYPE_UNDEFINED 0
1757#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1758#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1759#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1760#define SUN4V_ERR_TYPE_WARNING_RES 4
1761
1762 u32 err_attrs;
1763#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1764#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1765#define SUN4V_ERR_ATTRS_PIO 0x00000004
1766#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1767#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1768#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1769#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1770#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1771
1772 u64 err_raddr;
1773 u32 err_size;
1774 u16 err_cpu;
1775 u16 err_pad;
1776};
1777
1778static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1779static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1780
1781static const char *sun4v_err_type_to_str(u32 type)
1782{
1783 switch (type) {
1784 case SUN4V_ERR_TYPE_UNDEFINED:
1785 return "undefined";
1786 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1787 return "uncorrected resumable";
1788 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1789 return "precise nonresumable";
1790 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1791 return "deferred nonresumable";
1792 case SUN4V_ERR_TYPE_WARNING_RES:
1793 return "warning resumable";
1794 default:
1795 return "unknown";
1796 };
1797}
1798
1799static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1800{
1801 int cnt;
1802
1803 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1804 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1805 pfx,
1806 ent->err_handle, ent->err_stick,
1807 ent->err_type,
1808 sun4v_err_type_to_str(ent->err_type));
1809 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1810 pfx,
1811 ent->err_attrs,
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1813 "processor" : ""),
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1815 "memory" : ""),
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1817 "pio" : ""),
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1819 "integer-regs" : ""),
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1821 "fpu-regs" : ""),
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1823 "user" : ""),
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1825 "privileged" : ""),
1826 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1827 "queue-full" : ""));
1828 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1829 pfx,
1830 ent->err_raddr, ent->err_size, ent->err_cpu);
1831
1832 if ((cnt = atomic_read(ocnt)) != 0) {
1833 atomic_set(ocnt, 0);
1834 wmb();
1835 printk("%s: Queue overflowed %d times.\n",
1836 pfx, cnt);
1837 }
1838}
1839
1840/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1841 * Log the event and clear the first word of the entry.
1842 */
1843void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1844{
1845 struct sun4v_error_entry *ent, local_copy;
1846 struct trap_per_cpu *tb;
1847 unsigned long paddr;
1848 int cpu;
1849
1850 cpu = get_cpu();
1851
1852 tb = &trap_block[cpu];
1853 paddr = tb->resum_kernel_buf_pa + offset;
1854 ent = __va(paddr);
1855
1856 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1857
1858 /* We have a local copy now, so release the entry. */
1859 ent->err_handle = 0;
1860 wmb();
1861
1862 put_cpu();
1863
1864 sun4v_log_error(&local_copy, cpu,
1865 KERN_ERR "RESUMABLE ERROR",
1866 &sun4v_resum_oflow_cnt);
1867}
1868
1869/* If we try to printk() we'll probably make matters worse, by trying
1870 * to retake locks this cpu already holds or causing more errors. So
1871 * just bump a counter, and we'll report these counter bumps above.
1872 */
1873void sun4v_resum_overflow(struct pt_regs *regs)
1874{
1875 atomic_inc(&sun4v_resum_oflow_cnt);
1876}
1877
1878/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1879 * Log the event, clear the first word of the entry, and die.
1880 */
1881void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1882{
1883 struct sun4v_error_entry *ent, local_copy;
1884 struct trap_per_cpu *tb;
1885 unsigned long paddr;
1886 int cpu;
1887
1888 cpu = get_cpu();
1889
1890 tb = &trap_block[cpu];
1891 paddr = tb->nonresum_kernel_buf_pa + offset;
1892 ent = __va(paddr);
1893
1894 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1895
1896 /* We have a local copy now, so release the entry. */
1897 ent->err_handle = 0;
1898 wmb();
1899
1900 put_cpu();
1901
1902#ifdef CONFIG_PCI
1903 /* Check for the special PCI poke sequence. */
1904 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1905 pci_poke_faulted = 1;
1906 regs->tpc += 4;
1907 regs->tnpc = regs->tpc + 4;
1908 return;
1909 }
1910#endif
1911
1912 sun4v_log_error(&local_copy, cpu,
1913 KERN_EMERG "NON-RESUMABLE ERROR",
1914 &sun4v_nonresum_oflow_cnt);
1915
1916 panic("Non-resumable error.");
1917}
1918
1919/* If we try to printk() we'll probably make matters worse, by trying
1920 * to retake locks this cpu already holds or causing more errors. So
1921 * just bump a counter, and we'll report these counter bumps above.
1922 */
1923void sun4v_nonresum_overflow(struct pt_regs *regs)
1924{
1925 /* XXX Actually even this can make not that much sense. Perhaps
1926 * XXX we should just pull the plug and panic directly from here?
1927 */
1928 atomic_inc(&sun4v_nonresum_oflow_cnt);
1929}
1930
1da177e4
LT
1931void do_fpe_common(struct pt_regs *regs)
1932{
1933 if (regs->tstate & TSTATE_PRIV) {
1934 regs->tpc = regs->tnpc;
1935 regs->tnpc += 4;
1936 } else {
1937 unsigned long fsr = current_thread_info()->xfsr[0];
1938 siginfo_t info;
1939
1940 if (test_thread_flag(TIF_32BIT)) {
1941 regs->tpc &= 0xffffffff;
1942 regs->tnpc &= 0xffffffff;
1943 }
1944 info.si_signo = SIGFPE;
1945 info.si_errno = 0;
1946 info.si_addr = (void __user *)regs->tpc;
1947 info.si_trapno = 0;
1948 info.si_code = __SI_FAULT;
1949 if ((fsr & 0x1c000) == (1 << 14)) {
1950 if (fsr & 0x10)
1951 info.si_code = FPE_FLTINV;
1952 else if (fsr & 0x08)
1953 info.si_code = FPE_FLTOVF;
1954 else if (fsr & 0x04)
1955 info.si_code = FPE_FLTUND;
1956 else if (fsr & 0x02)
1957 info.si_code = FPE_FLTDIV;
1958 else if (fsr & 0x01)
1959 info.si_code = FPE_FLTRES;
1960 }
1961 force_sig_info(SIGFPE, &info, current);
1962 }
1963}
1964
1965void do_fpieee(struct pt_regs *regs)
1966{
1967 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1968 0, 0x24, SIGFPE) == NOTIFY_STOP)
1969 return;
1970
1971 do_fpe_common(regs);
1972}
1973
1974extern int do_mathemu(struct pt_regs *, struct fpustate *);
1975
1976void do_fpother(struct pt_regs *regs)
1977{
1978 struct fpustate *f = FPUSTATE;
1979 int ret = 0;
1980
1981 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1982 0, 0x25, SIGFPE) == NOTIFY_STOP)
1983 return;
1984
1985 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1986 case (2 << 14): /* unfinished_FPop */
1987 case (3 << 14): /* unimplemented_FPop */
1988 ret = do_mathemu(regs, f);
1989 break;
1990 }
1991 if (ret)
1992 return;
1993 do_fpe_common(regs);
1994}
1995
1996void do_tof(struct pt_regs *regs)
1997{
1998 siginfo_t info;
1999
2000 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2001 0, 0x26, SIGEMT) == NOTIFY_STOP)
2002 return;
2003
2004 if (regs->tstate & TSTATE_PRIV)
2005 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2006 if (test_thread_flag(TIF_32BIT)) {
2007 regs->tpc &= 0xffffffff;
2008 regs->tnpc &= 0xffffffff;
2009 }
2010 info.si_signo = SIGEMT;
2011 info.si_errno = 0;
2012 info.si_code = EMT_TAGOVF;
2013 info.si_addr = (void __user *)regs->tpc;
2014 info.si_trapno = 0;
2015 force_sig_info(SIGEMT, &info, current);
2016}
2017
2018void do_div0(struct pt_regs *regs)
2019{
2020 siginfo_t info;
2021
2022 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2023 0, 0x28, SIGFPE) == NOTIFY_STOP)
2024 return;
2025
2026 if (regs->tstate & TSTATE_PRIV)
2027 die_if_kernel("TL0: Kernel divide by zero.", regs);
2028 if (test_thread_flag(TIF_32BIT)) {
2029 regs->tpc &= 0xffffffff;
2030 regs->tnpc &= 0xffffffff;
2031 }
2032 info.si_signo = SIGFPE;
2033 info.si_errno = 0;
2034 info.si_code = FPE_INTDIV;
2035 info.si_addr = (void __user *)regs->tpc;
2036 info.si_trapno = 0;
2037 force_sig_info(SIGFPE, &info, current);
2038}
2039
2040void instruction_dump (unsigned int *pc)
2041{
2042 int i;
2043
2044 if ((((unsigned long) pc) & 3))
2045 return;
2046
2047 printk("Instruction DUMP:");
2048 for (i = -3; i < 6; i++)
2049 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2050 printk("\n");
2051}
2052
2053static void user_instruction_dump (unsigned int __user *pc)
2054{
2055 int i;
2056 unsigned int buf[9];
2057
2058 if ((((unsigned long) pc) & 3))
2059 return;
2060
2061 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2062 return;
2063
2064 printk("Instruction DUMP:");
2065 for (i = 0; i < 9; i++)
2066 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2067 printk("\n");
2068}
2069
2070void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2071{
2072 unsigned long pc, fp, thread_base, ksp;
ee3eea16 2073 void *tp = task_stack_page(tsk);
1da177e4
LT
2074 struct reg_window *rw;
2075 int count = 0;
2076
2077 ksp = (unsigned long) _ksp;
2078
2079 if (tp == current_thread_info())
2080 flushw_all();
2081
2082 fp = ksp + STACK_BIAS;
2083 thread_base = (unsigned long) tp;
2084
2085 printk("Call Trace:");
2086#ifdef CONFIG_KALLSYMS
2087 printk("\n");
2088#endif
2089 do {
2090 /* Bogus frame pointer? */
2091 if (fp < (thread_base + sizeof(struct thread_info)) ||
2092 fp >= (thread_base + THREAD_SIZE))
2093 break;
2094 rw = (struct reg_window *)fp;
2095 pc = rw->ins[7];
2096 printk(" [%016lx] ", pc);
2097 print_symbol("%s\n", pc);
2098 fp = rw->ins[6] + STACK_BIAS;
2099 } while (++count < 16);
2100#ifndef CONFIG_KALLSYMS
2101 printk("\n");
2102#endif
2103}
2104
2105void dump_stack(void)
2106{
2107 unsigned long *ksp;
2108
2109 __asm__ __volatile__("mov %%fp, %0"
2110 : "=r" (ksp));
2111 show_stack(current, ksp);
2112}
2113
2114EXPORT_SYMBOL(dump_stack);
2115
2116static inline int is_kernel_stack(struct task_struct *task,
2117 struct reg_window *rw)
2118{
2119 unsigned long rw_addr = (unsigned long) rw;
2120 unsigned long thread_base, thread_end;
2121
2122 if (rw_addr < PAGE_OFFSET) {
2123 if (task != &init_task)
2124 return 0;
2125 }
2126
ee3eea16 2127 thread_base = (unsigned long) task_stack_page(task);
1da177e4
LT
2128 thread_end = thread_base + sizeof(union thread_union);
2129 if (rw_addr >= thread_base &&
2130 rw_addr < thread_end &&
2131 !(rw_addr & 0x7UL))
2132 return 1;
2133
2134 return 0;
2135}
2136
2137static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2138{
2139 unsigned long fp = rw->ins[6];
2140
2141 if (!fp)
2142 return NULL;
2143
2144 return (struct reg_window *) (fp + STACK_BIAS);
2145}
2146
2147void die_if_kernel(char *str, struct pt_regs *regs)
2148{
2149 static int die_counter;
2150 extern void __show_regs(struct pt_regs * regs);
2151 extern void smp_report_regs(void);
2152 int count = 0;
2153
2154 /* Amuse the user. */
2155 printk(
2156" \\|/ ____ \\|/\n"
2157" \"@'/ .. \\`@\"\n"
2158" /_| \\__/ |_\\\n"
2159" \\__U_/\n");
2160
2161 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2162 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2163 __asm__ __volatile__("flushw");
2164 __show_regs(regs);
2165 if (regs->tstate & TSTATE_PRIV) {
2166 struct reg_window *rw = (struct reg_window *)
2167 (regs->u_regs[UREG_FP] + STACK_BIAS);
2168
2169 /* Stop the back trace when we hit userland or we
2170 * find some badly aligned kernel stack.
2171 */
2172 while (rw &&
2173 count++ < 30&&
2174 is_kernel_stack(current, rw)) {
2175 printk("Caller[%016lx]", rw->ins[7]);
2176 print_symbol(": %s", rw->ins[7]);
2177 printk("\n");
2178
2179 rw = kernel_stack_up(rw);
2180 }
2181 instruction_dump ((unsigned int *) regs->tpc);
2182 } else {
2183 if (test_thread_flag(TIF_32BIT)) {
2184 regs->tpc &= 0xffffffff;
2185 regs->tnpc &= 0xffffffff;
2186 }
2187 user_instruction_dump ((unsigned int __user *) regs->tpc);
2188 }
2189#ifdef CONFIG_SMP
2190 smp_report_regs();
2191#endif
2192
2193 if (regs->tstate & TSTATE_PRIV)
2194 do_exit(SIGKILL);
2195 do_exit(SIGSEGV);
2196}
2197
2198extern int handle_popc(u32 insn, struct pt_regs *regs);
2199extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2200
2201void do_illegal_instruction(struct pt_regs *regs)
2202{
2203 unsigned long pc = regs->tpc;
2204 unsigned long tstate = regs->tstate;
2205 u32 insn;
2206 siginfo_t info;
2207
2208 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2209 0, 0x10, SIGILL) == NOTIFY_STOP)
2210 return;
2211
2212 if (tstate & TSTATE_PRIV)
2213 die_if_kernel("Kernel illegal instruction", regs);
2214 if (test_thread_flag(TIF_32BIT))
2215 pc = (u32)pc;
2216 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2217 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2218 if (handle_popc(insn, regs))
2219 return;
2220 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2221 if (handle_ldf_stq(insn, regs))
2222 return;
2223 }
2224 }
2225 info.si_signo = SIGILL;
2226 info.si_errno = 0;
2227 info.si_code = ILL_ILLOPC;
2228 info.si_addr = (void __user *)pc;
2229 info.si_trapno = 0;
2230 force_sig_info(SIGILL, &info, current);
2231}
2232
ed6b0b45
DM
2233extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2234
1da177e4
LT
2235void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2236{
2237 siginfo_t info;
2238
2239 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2240 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2241 return;
2242
2243 if (regs->tstate & TSTATE_PRIV) {
ed6b0b45 2244 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1da177e4
LT
2245 return;
2246 }
2247 info.si_signo = SIGBUS;
2248 info.si_errno = 0;
2249 info.si_code = BUS_ADRALN;
2250 info.si_addr = (void __user *)sfar;
2251 info.si_trapno = 0;
2252 force_sig_info(SIGBUS, &info, current);
2253}
2254
9f8a5b84 2255void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
ed6b0b45
DM
2256{
2257 siginfo_t info;
2258
2259 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2260 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2261 return;
2262
2263 if (regs->tstate & TSTATE_PRIV) {
2264 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2265 return;
2266 }
2267 info.si_signo = SIGBUS;
2268 info.si_errno = 0;
2269 info.si_code = BUS_ADRALN;
2270 info.si_addr = (void __user *) addr;
2271 info.si_trapno = 0;
2272 force_sig_info(SIGBUS, &info, current);
2273}
2274
1da177e4
LT
2275void do_privop(struct pt_regs *regs)
2276{
2277 siginfo_t info;
2278
2279 if (notify_die(DIE_TRAP, "privileged operation", regs,
2280 0, 0x11, SIGILL) == NOTIFY_STOP)
2281 return;
2282
2283 if (test_thread_flag(TIF_32BIT)) {
2284 regs->tpc &= 0xffffffff;
2285 regs->tnpc &= 0xffffffff;
2286 }
2287 info.si_signo = SIGILL;
2288 info.si_errno = 0;
2289 info.si_code = ILL_PRVOPC;
2290 info.si_addr = (void __user *)regs->tpc;
2291 info.si_trapno = 0;
2292 force_sig_info(SIGILL, &info, current);
2293}
2294
2295void do_privact(struct pt_regs *regs)
2296{
2297 do_privop(regs);
2298}
2299
2300/* Trap level 1 stuff or other traps we should never see... */
2301void do_cee(struct pt_regs *regs)
2302{
2303 die_if_kernel("TL0: Cache Error Exception", regs);
2304}
2305
2306void do_cee_tl1(struct pt_regs *regs)
2307{
2308 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2309 die_if_kernel("TL1: Cache Error Exception", regs);
2310}
2311
2312void do_dae_tl1(struct pt_regs *regs)
2313{
2314 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2315 die_if_kernel("TL1: Data Access Exception", regs);
2316}
2317
2318void do_iae_tl1(struct pt_regs *regs)
2319{
2320 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2321 die_if_kernel("TL1: Instruction Access Exception", regs);
2322}
2323
2324void do_div0_tl1(struct pt_regs *regs)
2325{
2326 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2327 die_if_kernel("TL1: DIV0 Exception", regs);
2328}
2329
2330void do_fpdis_tl1(struct pt_regs *regs)
2331{
2332 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2333 die_if_kernel("TL1: FPU Disabled", regs);
2334}
2335
2336void do_fpieee_tl1(struct pt_regs *regs)
2337{
2338 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2339 die_if_kernel("TL1: FPU IEEE Exception", regs);
2340}
2341
2342void do_fpother_tl1(struct pt_regs *regs)
2343{
2344 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2345 die_if_kernel("TL1: FPU Other Exception", regs);
2346}
2347
2348void do_ill_tl1(struct pt_regs *regs)
2349{
2350 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2351 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2352}
2353
2354void do_irq_tl1(struct pt_regs *regs)
2355{
2356 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2357 die_if_kernel("TL1: IRQ Exception", regs);
2358}
2359
2360void do_lddfmna_tl1(struct pt_regs *regs)
2361{
2362 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2363 die_if_kernel("TL1: LDDF Exception", regs);
2364}
2365
2366void do_stdfmna_tl1(struct pt_regs *regs)
2367{
2368 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2369 die_if_kernel("TL1: STDF Exception", regs);
2370}
2371
2372void do_paw(struct pt_regs *regs)
2373{
2374 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2375}
2376
2377void do_paw_tl1(struct pt_regs *regs)
2378{
2379 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2380 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2381}
2382
2383void do_vaw(struct pt_regs *regs)
2384{
2385 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2386}
2387
2388void do_vaw_tl1(struct pt_regs *regs)
2389{
2390 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2391 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2392}
2393
2394void do_tof_tl1(struct pt_regs *regs)
2395{
2396 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2397 die_if_kernel("TL1: Tag Overflow Exception", regs);
2398}
2399
2400void do_getpsr(struct pt_regs *regs)
2401{
2402 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2403 regs->tpc = regs->tnpc;
2404 regs->tnpc += 4;
2405 if (test_thread_flag(TIF_32BIT)) {
2406 regs->tpc &= 0xffffffff;
2407 regs->tnpc &= 0xffffffff;
2408 }
2409}
2410
56fb4df6
DM
2411struct trap_per_cpu trap_block[NR_CPUS];
2412
2413/* This can get invoked before sched_init() so play it super safe
2414 * and use hard_smp_processor_id().
2415 */
2416void init_cur_cpu_trap(void)
2417{
2418 int cpu = hard_smp_processor_id();
2419 struct trap_per_cpu *p = &trap_block[cpu];
2420
2421 p->thread = current_thread_info();
2422 p->pgd_paddr = 0;
2423}
2424
1da177e4 2425extern void thread_info_offsets_are_bolixed_dave(void);
56fb4df6 2426extern void trap_per_cpu_offsets_are_bolixed_dave(void);
1da177e4
LT
2427
2428/* Only invoked on boot processor. */
2429void __init trap_init(void)
2430{
2431 /* Compile time sanity check. */
2432 if (TI_TASK != offsetof(struct thread_info, task) ||
2433 TI_FLAGS != offsetof(struct thread_info, flags) ||
2434 TI_CPU != offsetof(struct thread_info, cpu) ||
2435 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2436 TI_KSP != offsetof(struct thread_info, ksp) ||
2437 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2438 TI_KREGS != offsetof(struct thread_info, kregs) ||
2439 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2440 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2441 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2442 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2443 TI_GSR != offsetof(struct thread_info, gsr) ||
2444 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2445 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2446 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2447 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2448 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2449 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
1da177e4 2450 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
db7d9a4e
DM
2451 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2452 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
a3f99858
DM
2453 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2454 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2455 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
1da177e4
LT
2456 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2457 (TI_FPREGS & (64 - 1)))
2458 thread_info_offsets_are_bolixed_dave();
2459
56fb4df6 2460 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
e088ad7c
DM
2461 (TRAP_PER_CPU_PGD_PADDR !=
2462 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2463 (TRAP_PER_CPU_CPU_MONDO_PA !=
2464 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2465 (TRAP_PER_CPU_DEV_MONDO_PA !=
2466 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2467 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2468 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
5b0c0572
DM
2469 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2470 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
e088ad7c
DM
2471 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2472 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
5b0c0572
DM
2473 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2474 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
e088ad7c 2475 (TRAP_PER_CPU_FAULT_INFO !=
1d2f1f90
DM
2476 offsetof(struct trap_per_cpu, fault_info)) ||
2477 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2478 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2479 (TRAP_PER_CPU_CPU_LIST_PA !=
2480 offsetof(struct trap_per_cpu, cpu_list_pa)))
56fb4df6
DM
2481 trap_per_cpu_offsets_are_bolixed_dave();
2482
1da177e4
LT
2483 /* Attach to the address space of init_task. On SMP we
2484 * do this in smp.c:smp_callin for other cpus.
2485 */
2486 atomic_inc(&init_mm.mm_count);
2487 current->active_mm = &init_mm;
2488}