[SPARC64]: %l6 trap return handling no longer necessary.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / traps.c
CommitLineData
d979f179 1/* arch/sparc64/kernel/traps.c
1da177e4 2 *
d979f179 3 * Copyright (C) 1995,1997 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7/*
8 * I like traps on v9, :))))
9 */
10
1da177e4 11#include <linux/module.h>
a2c1e064 12#include <linux/sched.h>
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/kallsyms.h>
15#include <linux/signal.h>
16#include <linux/smp.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/init.h>
1eeb66a1 19#include <linux/kdebug.h>
1da177e4 20
2f4dfe20 21#include <asm/smp.h>
1da177e4
LT
22#include <asm/delay.h>
23#include <asm/system.h>
24#include <asm/ptrace.h>
25#include <asm/oplib.h>
26#include <asm/page.h>
27#include <asm/pgtable.h>
28#include <asm/unistd.h>
29#include <asm/uaccess.h>
30#include <asm/fpumacro.h>
31#include <asm/lsu.h>
32#include <asm/dcu.h>
33#include <asm/estate.h>
34#include <asm/chafsr.h>
6c52a96e 35#include <asm/sfafsr.h>
1da177e4
LT
36#include <asm/psrcompat.h>
37#include <asm/processor.h>
38#include <asm/timer.h>
92704a1c 39#include <asm/head.h>
1da177e4
LT
40#ifdef CONFIG_KMOD
41#include <linux/kmod.h>
42#endif
07f8e5f3 43#include <asm/prom.h>
1da177e4 44
99cd2201 45#include "entry.h"
1da177e4
LT
46
47/* When an irrecoverable trap occurs at tl > 0, the trap entry
48 * code logs the trap state registers at every level in the trap
49 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
50 * is as follows:
51 */
52struct tl1_traplog {
53 struct {
54 unsigned long tstate;
55 unsigned long tpc;
56 unsigned long tnpc;
57 unsigned long tt;
58 } trapstack[4];
59 unsigned long tl;
60};
61
62static void dump_tl1_traplog(struct tl1_traplog *p)
63{
3d6395cb 64 int i, limit;
1da177e4 65
04d74758
DM
66 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
67 "dumping track stack.\n", p->tl);
3d6395cb
DM
68
69 limit = (tlb_type == hypervisor) ? 2 : 4;
39334a4b 70 for (i = 0; i < limit; i++) {
04d74758 71 printk(KERN_EMERG
1da177e4
LT
72 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
73 "TNPC[%016lx] TT[%lx]\n",
74 i + 1,
75 p->trapstack[i].tstate, p->trapstack[i].tpc,
76 p->trapstack[i].tnpc, p->trapstack[i].tt);
5af47db7 77 print_symbol("TRAPLOG: TPC<%s>\n", p->trapstack[i].tpc);
1da177e4
LT
78 }
79}
80
1da177e4
LT
81void bad_trap(struct pt_regs *regs, long lvl)
82{
83 char buffer[32];
84 siginfo_t info;
85
86 if (notify_die(DIE_TRAP, "bad trap", regs,
87 0, lvl, SIGTRAP) == NOTIFY_STOP)
88 return;
89
90 if (lvl < 0x100) {
91 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
92 die_if_kernel(buffer, regs);
93 }
94
95 lvl -= 0x100;
96 if (regs->tstate & TSTATE_PRIV) {
97 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
98 die_if_kernel(buffer, regs);
99 }
100 if (test_thread_flag(TIF_32BIT)) {
101 regs->tpc &= 0xffffffff;
102 regs->tnpc &= 0xffffffff;
103 }
104 info.si_signo = SIGILL;
105 info.si_errno = 0;
106 info.si_code = ILL_ILLTRP;
107 info.si_addr = (void __user *)regs->tpc;
108 info.si_trapno = lvl;
109 force_sig_info(SIGILL, &info, current);
110}
111
112void bad_trap_tl1(struct pt_regs *regs, long lvl)
113{
114 char buffer[32];
115
116 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
117 0, lvl, SIGTRAP) == NOTIFY_STOP)
118 return;
119
120 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
121
122 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
123 die_if_kernel (buffer, regs);
124}
125
126#ifdef CONFIG_DEBUG_BUGVERBOSE
127void do_BUG(const char *file, int line)
128{
129 bust_spinlocks(1);
130 printk("kernel BUG at %s:%d!\n", file, line);
131}
132#endif
133
6c52a96e 134void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
135{
136 siginfo_t info;
137
138 if (notify_die(DIE_TRAP, "instruction access exception", regs,
139 0, 0x8, SIGTRAP) == NOTIFY_STOP)
140 return;
141
142 if (regs->tstate & TSTATE_PRIV) {
6c52a96e
DM
143 printk("spitfire_insn_access_exception: SFSR[%016lx] "
144 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
145 die_if_kernel("Iax", regs);
146 }
147 if (test_thread_flag(TIF_32BIT)) {
148 regs->tpc &= 0xffffffff;
149 regs->tnpc &= 0xffffffff;
150 }
151 info.si_signo = SIGSEGV;
152 info.si_errno = 0;
153 info.si_code = SEGV_MAPERR;
154 info.si_addr = (void __user *)regs->tpc;
155 info.si_trapno = 0;
156 force_sig_info(SIGSEGV, &info, current);
157}
158
6c52a96e 159void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
160{
161 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
162 0, 0x8, SIGTRAP) == NOTIFY_STOP)
163 return;
164
165 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 166 spitfire_insn_access_exception(regs, sfsr, sfar);
1da177e4
LT
167}
168
ed6b0b45
DM
169void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
170{
171 unsigned short type = (type_ctx >> 16);
172 unsigned short ctx = (type_ctx & 0xffff);
173 siginfo_t info;
174
175 if (notify_die(DIE_TRAP, "instruction access exception", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
178
179 if (regs->tstate & TSTATE_PRIV) {
180 printk("sun4v_insn_access_exception: ADDR[%016lx] "
181 "CTX[%04x] TYPE[%04x], going.\n",
182 addr, ctx, type);
183 die_if_kernel("Iax", regs);
184 }
185
186 if (test_thread_flag(TIF_32BIT)) {
187 regs->tpc &= 0xffffffff;
188 regs->tnpc &= 0xffffffff;
189 }
190 info.si_signo = SIGSEGV;
191 info.si_errno = 0;
192 info.si_code = SEGV_MAPERR;
193 info.si_addr = (void __user *) addr;
194 info.si_trapno = 0;
195 force_sig_info(SIGSEGV, &info, current);
196}
197
198void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
199{
200 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
201 0, 0x8, SIGTRAP) == NOTIFY_STOP)
202 return;
203
204 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
205 sun4v_insn_access_exception(regs, addr, type_ctx);
206}
207
6c52a96e 208void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
209{
210 siginfo_t info;
211
212 if (notify_die(DIE_TRAP, "data access exception", regs,
213 0, 0x30, SIGTRAP) == NOTIFY_STOP)
214 return;
215
216 if (regs->tstate & TSTATE_PRIV) {
217 /* Test if this comes from uaccess places. */
8cf14af0 218 const struct exception_table_entry *entry;
1da177e4 219
8cf14af0
DM
220 entry = search_exception_tables(regs->tpc);
221 if (entry) {
222 /* Ouch, somebody is trying VM hole tricks on us... */
1da177e4
LT
223#ifdef DEBUG_EXCEPTIONS
224 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
8cf14af0
DM
225 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
226 regs->tpc, entry->fixup);
1da177e4 227#endif
8cf14af0 228 regs->tpc = entry->fixup;
1da177e4 229 regs->tnpc = regs->tpc + 4;
1da177e4
LT
230 return;
231 }
232 /* Shit... */
6c52a96e
DM
233 printk("spitfire_data_access_exception: SFSR[%016lx] "
234 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
235 die_if_kernel("Dax", regs);
236 }
237
238 info.si_signo = SIGSEGV;
239 info.si_errno = 0;
240 info.si_code = SEGV_MAPERR;
241 info.si_addr = (void __user *)sfar;
242 info.si_trapno = 0;
243 force_sig_info(SIGSEGV, &info, current);
244}
245
6c52a96e 246void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
bde4e4ee
DM
247{
248 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
249 0, 0x30, SIGTRAP) == NOTIFY_STOP)
250 return;
251
252 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 253 spitfire_data_access_exception(regs, sfsr, sfar);
bde4e4ee
DM
254}
255
ed6b0b45
DM
256void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
257{
258 unsigned short type = (type_ctx >> 16);
259 unsigned short ctx = (type_ctx & 0xffff);
260 siginfo_t info;
261
262 if (notify_die(DIE_TRAP, "data access exception", regs,
263 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 return;
265
266 if (regs->tstate & TSTATE_PRIV) {
267 printk("sun4v_data_access_exception: ADDR[%016lx] "
268 "CTX[%04x] TYPE[%04x], going.\n",
269 addr, ctx, type);
55555633 270 die_if_kernel("Dax", regs);
ed6b0b45
DM
271 }
272
273 if (test_thread_flag(TIF_32BIT)) {
274 regs->tpc &= 0xffffffff;
275 regs->tnpc &= 0xffffffff;
276 }
277 info.si_signo = SIGSEGV;
278 info.si_errno = 0;
279 info.si_code = SEGV_MAPERR;
280 info.si_addr = (void __user *) addr;
281 info.si_trapno = 0;
282 force_sig_info(SIGSEGV, &info, current);
283}
284
285void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
286{
287 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
288 0, 0x8, SIGTRAP) == NOTIFY_STOP)
289 return;
290
291 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
292 sun4v_data_access_exception(regs, addr, type_ctx);
293}
294
1da177e4
LT
295#ifdef CONFIG_PCI
296/* This is really pathetic... */
297extern volatile int pci_poke_in_progress;
298extern volatile int pci_poke_cpu;
299extern volatile int pci_poke_faulted;
300#endif
301
302/* When access exceptions happen, we must do this. */
303static void spitfire_clean_and_reenable_l1_caches(void)
304{
305 unsigned long va;
306
307 if (tlb_type != spitfire)
308 BUG();
309
310 /* Clean 'em. */
311 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
312 spitfire_put_icache_tag(va, 0x0);
313 spitfire_put_dcache_tag(va, 0x0);
314 }
315
316 /* Re-enable in LSU. */
317 __asm__ __volatile__("flush %%g6\n\t"
318 "membar #Sync\n\t"
319 "stxa %0, [%%g0] %1\n\t"
320 "membar #Sync"
321 : /* no outputs */
322 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
323 LSU_CONTROL_IM | LSU_CONTROL_DM),
324 "i" (ASI_LSU_CONTROL)
325 : "memory");
326}
327
6c52a96e 328static void spitfire_enable_estate_errors(void)
1da177e4 329{
6c52a96e
DM
330 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
331 "membar #Sync"
332 : /* no outputs */
333 : "r" (ESTATE_ERR_ALL),
334 "i" (ASI_ESTATE_ERROR_EN));
1da177e4
LT
335}
336
337static char ecc_syndrome_table[] = {
338 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
339 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
340 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
341 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
342 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
343 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
344 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
345 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
346 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
347 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
348 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
349 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
350 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
351 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
352 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
353 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
354 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
355 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
356 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
357 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
358 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
359 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
360 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
361 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
362 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
363 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
364 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
365 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
366 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
367 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
368 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
369 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
370};
371
1da177e4
LT
372static char *syndrome_unknown = "<Unknown>";
373
6c52a96e 374static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
1da177e4 375{
6c52a96e
DM
376 unsigned short scode;
377 char memmod_str[64], *p;
1da177e4 378
6c52a96e
DM
379 if (udbl & bit) {
380 scode = ecc_syndrome_table[udbl & 0xff];
1da177e4
LT
381 if (prom_getunumber(scode, afar,
382 memmod_str, sizeof(memmod_str)) == -1)
383 p = syndrome_unknown;
384 else
385 p = memmod_str;
386 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
387 "Memory Module \"%s\"\n",
388 smp_processor_id(), scode, p);
389 }
390
6c52a96e
DM
391 if (udbh & bit) {
392 scode = ecc_syndrome_table[udbh & 0xff];
1da177e4
LT
393 if (prom_getunumber(scode, afar,
394 memmod_str, sizeof(memmod_str)) == -1)
395 p = syndrome_unknown;
396 else
397 p = memmod_str;
398 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
399 "Memory Module \"%s\"\n",
400 smp_processor_id(), scode, p);
401 }
6c52a96e
DM
402
403}
404
405static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
406{
407
408 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
409 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
410 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
411
412 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
413
414 /* We always log it, even if someone is listening for this
415 * trap.
416 */
417 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
418 0, TRAP_TYPE_CEE, SIGTRAP);
419
420 /* The Correctable ECC Error trap does not disable I/D caches. So
421 * we only have to restore the ESTATE Error Enable register.
422 */
423 spitfire_enable_estate_errors();
424}
425
426static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
427{
428 siginfo_t info;
429
430 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
431 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
432 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
433
434 /* XXX add more human friendly logging of the error status
435 * XXX as is implemented for cheetah
436 */
437
438 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
439
440 /* We always log it, even if someone is listening for this
441 * trap.
442 */
443 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
444 0, tt, SIGTRAP);
445
446 if (regs->tstate & TSTATE_PRIV) {
447 if (tl1)
448 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
449 die_if_kernel("UE", regs);
450 }
451
452 /* XXX need more intelligent processing here, such as is implemented
453 * XXX for cheetah errors, in fact if the E-cache still holds the
454 * XXX line with bad parity this will loop
455 */
456
457 spitfire_clean_and_reenable_l1_caches();
458 spitfire_enable_estate_errors();
459
460 if (test_thread_flag(TIF_32BIT)) {
461 regs->tpc &= 0xffffffff;
462 regs->tnpc &= 0xffffffff;
463 }
464 info.si_signo = SIGBUS;
465 info.si_errno = 0;
466 info.si_code = BUS_OBJERR;
467 info.si_addr = (void *)0;
468 info.si_trapno = 0;
469 force_sig_info(SIGBUS, &info, current);
470}
471
472void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
473{
474 unsigned long afsr, tt, udbh, udbl;
475 int tl1;
476
477 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
478 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
479 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
480 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
481 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
482
483#ifdef CONFIG_PCI
484 if (tt == TRAP_TYPE_DAE &&
485 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
486 spitfire_clean_and_reenable_l1_caches();
487 spitfire_enable_estate_errors();
488
489 pci_poke_faulted = 1;
490 regs->tnpc = regs->tpc + 4;
491 return;
492 }
493#endif
494
495 if (afsr & SFAFSR_UE)
496 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
497
498 if (tt == TRAP_TYPE_CEE) {
499 /* Handle the case where we took a CEE trap, but ACK'd
500 * only the UE state in the UDB error registers.
501 */
502 if (afsr & SFAFSR_UE) {
503 if (udbh & UDBE_CE) {
504 __asm__ __volatile__(
505 "stxa %0, [%1] %2\n\t"
506 "membar #Sync"
507 : /* no outputs */
508 : "r" (udbh & UDBE_CE),
509 "r" (0x0), "i" (ASI_UDB_ERROR_W));
510 }
511 if (udbl & UDBE_CE) {
512 __asm__ __volatile__(
513 "stxa %0, [%1] %2\n\t"
514 "membar #Sync"
515 : /* no outputs */
516 : "r" (udbl & UDBE_CE),
517 "r" (0x18), "i" (ASI_UDB_ERROR_W));
518 }
519 }
520
521 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
522 }
1da177e4
LT
523}
524
816242da
DM
525int cheetah_pcache_forced_on;
526
527void cheetah_enable_pcache(void)
528{
529 unsigned long dcr;
530
531 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
532 smp_processor_id());
533
534 __asm__ __volatile__("ldxa [%%g0] %1, %0"
535 : "=r" (dcr)
536 : "i" (ASI_DCU_CONTROL_REG));
537 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
538 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
539 "membar #Sync"
540 : /* no outputs */
541 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
542}
543
1da177e4
LT
544/* Cheetah error trap handling. */
545static unsigned long ecache_flush_physbase;
546static unsigned long ecache_flush_linesize;
547static unsigned long ecache_flush_size;
548
1da177e4
LT
549/* This table is ordered in priority of errors and matches the
550 * AFAR overwrite policy as well.
551 */
552
553struct afsr_error_table {
554 unsigned long mask;
555 const char *name;
556};
557
558static const char CHAFSR_PERR_msg[] =
559 "System interface protocol error";
560static const char CHAFSR_IERR_msg[] =
561 "Internal processor error";
562static const char CHAFSR_ISAP_msg[] =
563 "System request parity error on incoming addresss";
564static const char CHAFSR_UCU_msg[] =
565 "Uncorrectable E-cache ECC error for ifetch/data";
566static const char CHAFSR_UCC_msg[] =
567 "SW Correctable E-cache ECC error for ifetch/data";
568static const char CHAFSR_UE_msg[] =
569 "Uncorrectable system bus data ECC error for read";
570static const char CHAFSR_EDU_msg[] =
571 "Uncorrectable E-cache ECC error for stmerge/blkld";
572static const char CHAFSR_EMU_msg[] =
573 "Uncorrectable system bus MTAG error";
574static const char CHAFSR_WDU_msg[] =
575 "Uncorrectable E-cache ECC error for writeback";
576static const char CHAFSR_CPU_msg[] =
577 "Uncorrectable ECC error for copyout";
578static const char CHAFSR_CE_msg[] =
579 "HW corrected system bus data ECC error for read";
580static const char CHAFSR_EDC_msg[] =
581 "HW corrected E-cache ECC error for stmerge/blkld";
582static const char CHAFSR_EMC_msg[] =
583 "HW corrected system bus MTAG ECC error";
584static const char CHAFSR_WDC_msg[] =
585 "HW corrected E-cache ECC error for writeback";
586static const char CHAFSR_CPC_msg[] =
587 "HW corrected ECC error for copyout";
588static const char CHAFSR_TO_msg[] =
589 "Unmapped error from system bus";
590static const char CHAFSR_BERR_msg[] =
591 "Bus error response from system bus";
592static const char CHAFSR_IVC_msg[] =
593 "HW corrected system bus data ECC error for ivec read";
594static const char CHAFSR_IVU_msg[] =
595 "Uncorrectable system bus data ECC error for ivec read";
596static struct afsr_error_table __cheetah_error_table[] = {
597 { CHAFSR_PERR, CHAFSR_PERR_msg },
598 { CHAFSR_IERR, CHAFSR_IERR_msg },
599 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
600 { CHAFSR_UCU, CHAFSR_UCU_msg },
601 { CHAFSR_UCC, CHAFSR_UCC_msg },
602 { CHAFSR_UE, CHAFSR_UE_msg },
603 { CHAFSR_EDU, CHAFSR_EDU_msg },
604 { CHAFSR_EMU, CHAFSR_EMU_msg },
605 { CHAFSR_WDU, CHAFSR_WDU_msg },
606 { CHAFSR_CPU, CHAFSR_CPU_msg },
607 { CHAFSR_CE, CHAFSR_CE_msg },
608 { CHAFSR_EDC, CHAFSR_EDC_msg },
609 { CHAFSR_EMC, CHAFSR_EMC_msg },
610 { CHAFSR_WDC, CHAFSR_WDC_msg },
611 { CHAFSR_CPC, CHAFSR_CPC_msg },
612 { CHAFSR_TO, CHAFSR_TO_msg },
613 { CHAFSR_BERR, CHAFSR_BERR_msg },
614 /* These two do not update the AFAR. */
615 { CHAFSR_IVC, CHAFSR_IVC_msg },
616 { CHAFSR_IVU, CHAFSR_IVU_msg },
617 { 0, NULL },
618};
619static const char CHPAFSR_DTO_msg[] =
620 "System bus unmapped error for prefetch/storequeue-read";
621static const char CHPAFSR_DBERR_msg[] =
622 "System bus error for prefetch/storequeue-read";
623static const char CHPAFSR_THCE_msg[] =
624 "Hardware corrected E-cache Tag ECC error";
625static const char CHPAFSR_TSCE_msg[] =
626 "SW handled correctable E-cache Tag ECC error";
627static const char CHPAFSR_TUE_msg[] =
628 "Uncorrectable E-cache Tag ECC error";
629static const char CHPAFSR_DUE_msg[] =
630 "System bus uncorrectable data ECC error due to prefetch/store-fill";
631static struct afsr_error_table __cheetah_plus_error_table[] = {
632 { CHAFSR_PERR, CHAFSR_PERR_msg },
633 { CHAFSR_IERR, CHAFSR_IERR_msg },
634 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
635 { CHAFSR_UCU, CHAFSR_UCU_msg },
636 { CHAFSR_UCC, CHAFSR_UCC_msg },
637 { CHAFSR_UE, CHAFSR_UE_msg },
638 { CHAFSR_EDU, CHAFSR_EDU_msg },
639 { CHAFSR_EMU, CHAFSR_EMU_msg },
640 { CHAFSR_WDU, CHAFSR_WDU_msg },
641 { CHAFSR_CPU, CHAFSR_CPU_msg },
642 { CHAFSR_CE, CHAFSR_CE_msg },
643 { CHAFSR_EDC, CHAFSR_EDC_msg },
644 { CHAFSR_EMC, CHAFSR_EMC_msg },
645 { CHAFSR_WDC, CHAFSR_WDC_msg },
646 { CHAFSR_CPC, CHAFSR_CPC_msg },
647 { CHAFSR_TO, CHAFSR_TO_msg },
648 { CHAFSR_BERR, CHAFSR_BERR_msg },
649 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
650 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
651 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
652 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
653 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
654 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
655 /* These two do not update the AFAR. */
656 { CHAFSR_IVC, CHAFSR_IVC_msg },
657 { CHAFSR_IVU, CHAFSR_IVU_msg },
658 { 0, NULL },
659};
660static const char JPAFSR_JETO_msg[] =
661 "System interface protocol error, hw timeout caused";
662static const char JPAFSR_SCE_msg[] =
663 "Parity error on system snoop results";
664static const char JPAFSR_JEIC_msg[] =
665 "System interface protocol error, illegal command detected";
666static const char JPAFSR_JEIT_msg[] =
667 "System interface protocol error, illegal ADTYPE detected";
668static const char JPAFSR_OM_msg[] =
669 "Out of range memory error has occurred";
670static const char JPAFSR_ETP_msg[] =
671 "Parity error on L2 cache tag SRAM";
672static const char JPAFSR_UMS_msg[] =
673 "Error due to unsupported store";
674static const char JPAFSR_RUE_msg[] =
675 "Uncorrectable ECC error from remote cache/memory";
676static const char JPAFSR_RCE_msg[] =
677 "Correctable ECC error from remote cache/memory";
678static const char JPAFSR_BP_msg[] =
679 "JBUS parity error on returned read data";
680static const char JPAFSR_WBP_msg[] =
681 "JBUS parity error on data for writeback or block store";
682static const char JPAFSR_FRC_msg[] =
683 "Foreign read to DRAM incurring correctable ECC error";
684static const char JPAFSR_FRU_msg[] =
685 "Foreign read to DRAM incurring uncorrectable ECC error";
686static struct afsr_error_table __jalapeno_error_table[] = {
687 { JPAFSR_JETO, JPAFSR_JETO_msg },
688 { JPAFSR_SCE, JPAFSR_SCE_msg },
689 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
690 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
691 { CHAFSR_PERR, CHAFSR_PERR_msg },
692 { CHAFSR_IERR, CHAFSR_IERR_msg },
693 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
694 { CHAFSR_UCU, CHAFSR_UCU_msg },
695 { CHAFSR_UCC, CHAFSR_UCC_msg },
696 { CHAFSR_UE, CHAFSR_UE_msg },
697 { CHAFSR_EDU, CHAFSR_EDU_msg },
698 { JPAFSR_OM, JPAFSR_OM_msg },
699 { CHAFSR_WDU, CHAFSR_WDU_msg },
700 { CHAFSR_CPU, CHAFSR_CPU_msg },
701 { CHAFSR_CE, CHAFSR_CE_msg },
702 { CHAFSR_EDC, CHAFSR_EDC_msg },
703 { JPAFSR_ETP, JPAFSR_ETP_msg },
704 { CHAFSR_WDC, CHAFSR_WDC_msg },
705 { CHAFSR_CPC, CHAFSR_CPC_msg },
706 { CHAFSR_TO, CHAFSR_TO_msg },
707 { CHAFSR_BERR, CHAFSR_BERR_msg },
708 { JPAFSR_UMS, JPAFSR_UMS_msg },
709 { JPAFSR_RUE, JPAFSR_RUE_msg },
710 { JPAFSR_RCE, JPAFSR_RCE_msg },
711 { JPAFSR_BP, JPAFSR_BP_msg },
712 { JPAFSR_WBP, JPAFSR_WBP_msg },
713 { JPAFSR_FRC, JPAFSR_FRC_msg },
714 { JPAFSR_FRU, JPAFSR_FRU_msg },
715 /* These two do not update the AFAR. */
716 { CHAFSR_IVU, CHAFSR_IVU_msg },
717 { 0, NULL },
718};
719static struct afsr_error_table *cheetah_error_table;
720static unsigned long cheetah_afsr_errors;
721
1da177e4
LT
722struct cheetah_err_info *cheetah_error_log;
723
d979f179 724static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
1da177e4
LT
725{
726 struct cheetah_err_info *p;
727 int cpu = smp_processor_id();
728
729 if (!cheetah_error_log)
730 return NULL;
731
732 p = cheetah_error_log + (cpu * 2);
733 if ((afsr & CHAFSR_TL1) != 0UL)
734 p++;
735
736 return p;
737}
738
739extern unsigned int tl0_icpe[], tl1_icpe[];
740extern unsigned int tl0_dcpe[], tl1_dcpe[];
741extern unsigned int tl0_fecc[], tl1_fecc[];
742extern unsigned int tl0_cee[], tl1_cee[];
743extern unsigned int tl0_iae[], tl1_iae[];
744extern unsigned int tl0_dae[], tl1_dae[];
745extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
746extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
747extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
748extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
749extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
750
751void __init cheetah_ecache_flush_init(void)
752{
753 unsigned long largest_size, smallest_linesize, order, ver;
5cbc3073 754 int i, sz;
1da177e4
LT
755
756 /* Scan all cpu device tree nodes, note two values:
757 * 1) largest E-cache size
758 * 2) smallest E-cache line size
759 */
760 largest_size = 0UL;
761 smallest_linesize = ~0UL;
762
5cbc3073 763 for (i = 0; i < NR_CPUS; i++) {
1da177e4
LT
764 unsigned long val;
765
5cbc3073
DM
766 val = cpu_data(i).ecache_size;
767 if (!val)
768 continue;
769
1da177e4
LT
770 if (val > largest_size)
771 largest_size = val;
5cbc3073
DM
772
773 val = cpu_data(i).ecache_line_size;
1da177e4
LT
774 if (val < smallest_linesize)
775 smallest_linesize = val;
5cbc3073 776
1da177e4
LT
777 }
778
779 if (largest_size == 0UL || smallest_linesize == ~0UL) {
780 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
781 "parameters.\n");
782 prom_halt();
783 }
784
785 ecache_flush_size = (2 * largest_size);
786 ecache_flush_linesize = smallest_linesize;
787
10147570 788 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
1da177e4 789
10147570 790 if (ecache_flush_physbase == ~0UL) {
1da177e4 791 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
10147570
DM
792 "contiguous physical memory.\n",
793 ecache_flush_size);
1da177e4
LT
794 prom_halt();
795 }
796
797 /* Now allocate error trap reporting scoreboard. */
07f8e5f3 798 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
1da177e4 799 for (order = 0; order < MAX_ORDER; order++) {
07f8e5f3 800 if ((PAGE_SIZE << order) >= sz)
1da177e4
LT
801 break;
802 }
803 cheetah_error_log = (struct cheetah_err_info *)
804 __get_free_pages(GFP_KERNEL, order);
805 if (!cheetah_error_log) {
806 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
07f8e5f3 807 "error logging scoreboard (%d bytes).\n", sz);
1da177e4
LT
808 prom_halt();
809 }
810 memset(cheetah_error_log, 0, PAGE_SIZE << order);
811
812 /* Mark all AFSRs as invalid so that the trap handler will
813 * log new new information there.
814 */
815 for (i = 0; i < 2 * NR_CPUS; i++)
816 cheetah_error_log[i].afsr = CHAFSR_INVALID;
817
818 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
819 if ((ver >> 32) == __JALAPENO_ID ||
820 (ver >> 32) == __SERRANO_ID) {
1da177e4
LT
821 cheetah_error_table = &__jalapeno_error_table[0];
822 cheetah_afsr_errors = JPAFSR_ERRORS;
823 } else if ((ver >> 32) == 0x003e0015) {
824 cheetah_error_table = &__cheetah_plus_error_table[0];
825 cheetah_afsr_errors = CHPAFSR_ERRORS;
826 } else {
827 cheetah_error_table = &__cheetah_error_table[0];
828 cheetah_afsr_errors = CHAFSR_ERRORS;
829 }
830
831 /* Now patch trap tables. */
832 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
833 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
834 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
835 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
836 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
837 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
838 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
839 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
840 if (tlb_type == cheetah_plus) {
841 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
842 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
843 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
844 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
845 }
846 flushi(PAGE_OFFSET);
847}
848
849static void cheetah_flush_ecache(void)
850{
851 unsigned long flush_base = ecache_flush_physbase;
852 unsigned long flush_linesize = ecache_flush_linesize;
853 unsigned long flush_size = ecache_flush_size;
854
855 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
856 " bne,pt %%xcc, 1b\n\t"
857 " ldxa [%2 + %0] %3, %%g0\n\t"
858 : "=&r" (flush_size)
859 : "0" (flush_size), "r" (flush_base),
860 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
861}
862
863static void cheetah_flush_ecache_line(unsigned long physaddr)
864{
865 unsigned long alias;
866
867 physaddr &= ~(8UL - 1UL);
868 physaddr = (ecache_flush_physbase +
869 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
870 alias = physaddr + (ecache_flush_size >> 1UL);
871 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
872 "ldxa [%1] %2, %%g0\n\t"
873 "membar #Sync"
874 : /* no outputs */
875 : "r" (physaddr), "r" (alias),
876 "i" (ASI_PHYS_USE_EC));
877}
878
879/* Unfortunately, the diagnostic access to the I-cache tags we need to
880 * use to clear the thing interferes with I-cache coherency transactions.
881 *
882 * So we must only flush the I-cache when it is disabled.
883 */
884static void __cheetah_flush_icache(void)
885{
80dc0d6b
DM
886 unsigned int icache_size, icache_line_size;
887 unsigned long addr;
888
889 icache_size = local_cpu_data().icache_size;
890 icache_line_size = local_cpu_data().icache_line_size;
1da177e4
LT
891
892 /* Clear the valid bits in all the tags. */
80dc0d6b 893 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1da177e4
LT
894 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
895 "membar #Sync"
896 : /* no outputs */
80dc0d6b
DM
897 : "r" (addr | (2 << 3)),
898 "i" (ASI_IC_TAG));
1da177e4
LT
899 }
900}
901
902static void cheetah_flush_icache(void)
903{
904 unsigned long dcu_save;
905
906 /* Save current DCU, disable I-cache. */
907 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
908 "or %0, %2, %%g1\n\t"
909 "stxa %%g1, [%%g0] %1\n\t"
910 "membar #Sync"
911 : "=r" (dcu_save)
912 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
913 : "g1");
914
915 __cheetah_flush_icache();
916
917 /* Restore DCU register */
918 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
919 "membar #Sync"
920 : /* no outputs */
921 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
922}
923
924static void cheetah_flush_dcache(void)
925{
80dc0d6b
DM
926 unsigned int dcache_size, dcache_line_size;
927 unsigned long addr;
928
929 dcache_size = local_cpu_data().dcache_size;
930 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 931
80dc0d6b 932 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1da177e4
LT
933 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
934 "membar #Sync"
935 : /* no outputs */
80dc0d6b 936 : "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
937 }
938}
939
940/* In order to make the even parity correct we must do two things.
941 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
942 * Next, we clear out all 32-bytes of data for that line. Data of
943 * all-zero + tag parity value of zero == correct parity.
944 */
945static void cheetah_plus_zap_dcache_parity(void)
946{
80dc0d6b
DM
947 unsigned int dcache_size, dcache_line_size;
948 unsigned long addr;
949
950 dcache_size = local_cpu_data().dcache_size;
951 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 952
80dc0d6b
DM
953 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
954 unsigned long tag = (addr >> 14);
955 unsigned long line;
1da177e4
LT
956
957 __asm__ __volatile__("membar #Sync\n\t"
958 "stxa %0, [%1] %2\n\t"
959 "membar #Sync"
960 : /* no outputs */
80dc0d6b 961 : "r" (tag), "r" (addr),
1da177e4 962 "i" (ASI_DCACHE_UTAG));
80dc0d6b 963 for (line = addr; line < addr + dcache_line_size; line += 8)
1da177e4
LT
964 __asm__ __volatile__("membar #Sync\n\t"
965 "stxa %%g0, [%0] %1\n\t"
966 "membar #Sync"
967 : /* no outputs */
80dc0d6b
DM
968 : "r" (line),
969 "i" (ASI_DCACHE_DATA));
1da177e4
LT
970 }
971}
972
973/* Conversion tables used to frob Cheetah AFSR syndrome values into
974 * something palatable to the memory controller driver get_unumber
975 * routine.
976 */
977#define MT0 137
978#define MT1 138
979#define MT2 139
980#define NONE 254
981#define MTC0 140
982#define MTC1 141
983#define MTC2 142
984#define MTC3 143
985#define C0 128
986#define C1 129
987#define C2 130
988#define C3 131
989#define C4 132
990#define C5 133
991#define C6 134
992#define C7 135
993#define C8 136
994#define M2 144
995#define M3 145
996#define M4 146
997#define M 147
998static unsigned char cheetah_ecc_syntab[] = {
999/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1000/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1001/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1002/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1003/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1004/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1005/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1006/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1007/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1008/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1009/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1010/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1011/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1012/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1013/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1014/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1015/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1016/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1017/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1018/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1019/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1020/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1021/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1022/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1023/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1024/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1025/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1026/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1027/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1028/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1029/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1030/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1031};
1032static unsigned char cheetah_mtag_syntab[] = {
1033 NONE, MTC0,
1034 MTC1, NONE,
1035 MTC2, NONE,
1036 NONE, MT0,
1037 MTC3, NONE,
1038 NONE, MT1,
1039 NONE, MT2,
1040 NONE, NONE
1041};
1042
1043/* Return the highest priority error conditon mentioned. */
d979f179 1044static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1da177e4
LT
1045{
1046 unsigned long tmp = 0;
1047 int i;
1048
1049 for (i = 0; cheetah_error_table[i].mask; i++) {
1050 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1051 return tmp;
1052 }
1053 return tmp;
1054}
1055
1056static const char *cheetah_get_string(unsigned long bit)
1057{
1058 int i;
1059
1060 for (i = 0; cheetah_error_table[i].mask; i++) {
1061 if ((bit & cheetah_error_table[i].mask) != 0UL)
1062 return cheetah_error_table[i].name;
1063 }
1064 return "???";
1065}
1066
1067extern int chmc_getunumber(int, unsigned long, char *, int);
1068
1069static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1070 unsigned long afsr, unsigned long afar, int recoverable)
1071{
1072 unsigned long hipri;
1073 char unum[256];
1074
1075 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 afsr, afar,
1078 (afsr & CHAFSR_TL1) ? 1 : 0);
955c054f 1079 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1da177e4 1080 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
955c054f 1081 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5af47db7
DM
1082 printk("%s" "ERROR(%d): ",
1083 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1084 print_symbol("TPC<%s>\n", regs->tpc);
1da177e4
LT
1085 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1086 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1087 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1088 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1089 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1090 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1091 hipri = cheetah_get_hipri(afsr);
1092 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1093 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1094 hipri, cheetah_get_string(hipri));
1095
1096 /* Try to get unumber if relevant. */
1097#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1098 CHAFSR_CPC | CHAFSR_CPU | \
1099 CHAFSR_UE | CHAFSR_CE | \
1100 CHAFSR_EDC | CHAFSR_EDU | \
1101 CHAFSR_UCC | CHAFSR_UCU | \
1102 CHAFSR_WDU | CHAFSR_WDC)
1103#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1104 if (afsr & ESYND_ERRORS) {
1105 int syndrome;
1106 int ret;
1107
1108 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1109 syndrome = cheetah_ecc_syntab[syndrome];
1110 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1111 if (ret != -1)
1112 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT),
1114 smp_processor_id(), unum);
1115 } else if (afsr & MSYND_ERRORS) {
1116 int syndrome;
1117 int ret;
1118
1119 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1120 syndrome = cheetah_mtag_syntab[syndrome];
1121 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1122 if (ret != -1)
1123 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1124 (recoverable ? KERN_WARNING : KERN_CRIT),
1125 smp_processor_id(), unum);
1126 }
1127
1128 /* Now dump the cache snapshots. */
1129 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1130 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1131 (int) info->dcache_index,
1132 info->dcache_tag,
1133 info->dcache_utag,
1134 info->dcache_stag);
1135 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 info->dcache_data[0],
1138 info->dcache_data[1],
1139 info->dcache_data[2],
1140 info->dcache_data[3]);
1141 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1142 "u[%016lx] l[%016lx]\n",
1143 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1144 (int) info->icache_index,
1145 info->icache_tag,
1146 info->icache_utag,
1147 info->icache_stag,
1148 info->icache_upper,
1149 info->icache_lower);
1150 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1151 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1152 info->icache_data[0],
1153 info->icache_data[1],
1154 info->icache_data[2],
1155 info->icache_data[3]);
1156 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1157 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1158 info->icache_data[4],
1159 info->icache_data[5],
1160 info->icache_data[6],
1161 info->icache_data[7]);
1162 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 (int) info->ecache_index, info->ecache_tag);
1165 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1166 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1167 info->ecache_data[0],
1168 info->ecache_data[1],
1169 info->ecache_data[2],
1170 info->ecache_data[3]);
1171
1172 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1173 while (afsr != 0UL) {
1174 unsigned long bit = cheetah_get_hipri(afsr);
1175
1176 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1177 (recoverable ? KERN_WARNING : KERN_CRIT),
1178 bit, cheetah_get_string(bit));
1179
1180 afsr &= ~bit;
1181 }
1182
1183 if (!recoverable)
1184 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1185}
1186
1187static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1188{
1189 unsigned long afsr, afar;
1190 int ret = 0;
1191
1192 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1193 : "=r" (afsr)
1194 : "i" (ASI_AFSR));
1195 if ((afsr & cheetah_afsr_errors) != 0) {
1196 if (logp != NULL) {
1197 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1198 : "=r" (afar)
1199 : "i" (ASI_AFAR));
1200 logp->afsr = afsr;
1201 logp->afar = afar;
1202 }
1203 ret = 1;
1204 }
1205 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1206 "membar #Sync\n\t"
1207 : : "r" (afsr), "i" (ASI_AFSR));
1208
1209 return ret;
1210}
1211
1212void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1213{
1214 struct cheetah_err_info local_snapshot, *p;
1215 int recoverable;
1216
1217 /* Flush E-cache */
1218 cheetah_flush_ecache();
1219
1220 p = cheetah_get_error_log(afsr);
1221 if (!p) {
1222 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1223 afsr, afar);
1224 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1225 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1226 prom_halt();
1227 }
1228
1229 /* Grab snapshot of logged error. */
1230 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1231
1232 /* If the current trap snapshot does not match what the
1233 * trap handler passed along into our args, big trouble.
1234 * In such a case, mark the local copy as invalid.
1235 *
1236 * Else, it matches and we mark the afsr in the non-local
1237 * copy as invalid so we may log new error traps there.
1238 */
1239 if (p->afsr != afsr || p->afar != afar)
1240 local_snapshot.afsr = CHAFSR_INVALID;
1241 else
1242 p->afsr = CHAFSR_INVALID;
1243
1244 cheetah_flush_icache();
1245 cheetah_flush_dcache();
1246
1247 /* Re-enable I-cache/D-cache */
1248 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1249 "or %%g1, %1, %%g1\n\t"
1250 "stxa %%g1, [%%g0] %0\n\t"
1251 "membar #Sync"
1252 : /* no outputs */
1253 : "i" (ASI_DCU_CONTROL_REG),
1254 "i" (DCU_DC | DCU_IC)
1255 : "g1");
1256
1257 /* Re-enable error reporting */
1258 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1259 "or %%g1, %1, %%g1\n\t"
1260 "stxa %%g1, [%%g0] %0\n\t"
1261 "membar #Sync"
1262 : /* no outputs */
1263 : "i" (ASI_ESTATE_ERROR_EN),
1264 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1265 : "g1");
1266
1267 /* Decide if we can continue after handling this trap and
1268 * logging the error.
1269 */
1270 recoverable = 1;
1271 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1272 recoverable = 0;
1273
1274 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1275 * error was logged while we had error reporting traps disabled.
1276 */
1277 if (cheetah_recheck_errors(&local_snapshot)) {
1278 unsigned long new_afsr = local_snapshot.afsr;
1279
1280 /* If we got a new asynchronous error, die... */
1281 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1282 CHAFSR_WDU | CHAFSR_CPU |
1283 CHAFSR_IVU | CHAFSR_UE |
1284 CHAFSR_BERR | CHAFSR_TO))
1285 recoverable = 0;
1286 }
1287
1288 /* Log errors. */
1289 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1290
1291 if (!recoverable)
1292 panic("Irrecoverable Fast-ECC error trap.\n");
1293
1294 /* Flush E-cache to kick the error trap handlers out. */
1295 cheetah_flush_ecache();
1296}
1297
1298/* Try to fix a correctable error by pushing the line out from
1299 * the E-cache. Recheck error reporting registers to see if the
1300 * problem is intermittent.
1301 */
1302static int cheetah_fix_ce(unsigned long physaddr)
1303{
1304 unsigned long orig_estate;
1305 unsigned long alias1, alias2;
1306 int ret;
1307
1308 /* Make sure correctable error traps are disabled. */
1309 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1310 "andn %0, %1, %%g1\n\t"
1311 "stxa %%g1, [%%g0] %2\n\t"
1312 "membar #Sync"
1313 : "=&r" (orig_estate)
1314 : "i" (ESTATE_ERROR_CEEN),
1315 "i" (ASI_ESTATE_ERROR_EN)
1316 : "g1");
1317
1318 /* We calculate alias addresses that will force the
1319 * cache line in question out of the E-cache. Then
1320 * we bring it back in with an atomic instruction so
1321 * that we get it in some modified/exclusive state,
1322 * then we displace it again to try and get proper ECC
1323 * pushed back into the system.
1324 */
1325 physaddr &= ~(8UL - 1UL);
1326 alias1 = (ecache_flush_physbase +
1327 (physaddr & ((ecache_flush_size >> 1) - 1)));
1328 alias2 = alias1 + (ecache_flush_size >> 1);
1329 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1330 "ldxa [%1] %3, %%g0\n\t"
1331 "casxa [%2] %3, %%g0, %%g0\n\t"
1332 "membar #StoreLoad | #StoreStore\n\t"
1333 "ldxa [%0] %3, %%g0\n\t"
1334 "ldxa [%1] %3, %%g0\n\t"
1335 "membar #Sync"
1336 : /* no outputs */
1337 : "r" (alias1), "r" (alias2),
1338 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1339
1340 /* Did that trigger another error? */
1341 if (cheetah_recheck_errors(NULL)) {
1342 /* Try one more time. */
1343 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1344 "membar #Sync"
1345 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1346 if (cheetah_recheck_errors(NULL))
1347 ret = 2;
1348 else
1349 ret = 1;
1350 } else {
1351 /* No new error, intermittent problem. */
1352 ret = 0;
1353 }
1354
1355 /* Restore error enables. */
1356 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1357 "membar #Sync"
1358 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1359
1360 return ret;
1361}
1362
1363/* Return non-zero if PADDR is a valid physical memory address. */
1364static int cheetah_check_main_memory(unsigned long paddr)
1365{
10147570 1366 unsigned long vaddr = PAGE_OFFSET + paddr;
1da177e4 1367
13edad7a 1368 if (vaddr > (unsigned long) high_memory)
ed3ffaf7
DM
1369 return 0;
1370
10147570 1371 return kern_addr_valid(vaddr);
1da177e4
LT
1372}
1373
1374void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1375{
1376 struct cheetah_err_info local_snapshot, *p;
1377 int recoverable, is_memory;
1378
1379 p = cheetah_get_error_log(afsr);
1380 if (!p) {
1381 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1382 afsr, afar);
1383 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1384 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1385 prom_halt();
1386 }
1387
1388 /* Grab snapshot of logged error. */
1389 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1390
1391 /* If the current trap snapshot does not match what the
1392 * trap handler passed along into our args, big trouble.
1393 * In such a case, mark the local copy as invalid.
1394 *
1395 * Else, it matches and we mark the afsr in the non-local
1396 * copy as invalid so we may log new error traps there.
1397 */
1398 if (p->afsr != afsr || p->afar != afar)
1399 local_snapshot.afsr = CHAFSR_INVALID;
1400 else
1401 p->afsr = CHAFSR_INVALID;
1402
1403 is_memory = cheetah_check_main_memory(afar);
1404
1405 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1406 /* XXX Might want to log the results of this operation
1407 * XXX somewhere... -DaveM
1408 */
1409 cheetah_fix_ce(afar);
1410 }
1411
1412 {
1413 int flush_all, flush_line;
1414
1415 flush_all = flush_line = 0;
1416 if ((afsr & CHAFSR_EDC) != 0UL) {
1417 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1418 flush_line = 1;
1419 else
1420 flush_all = 1;
1421 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1422 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1423 flush_line = 1;
1424 else
1425 flush_all = 1;
1426 }
1427
1428 /* Trap handler only disabled I-cache, flush it. */
1429 cheetah_flush_icache();
1430
1431 /* Re-enable I-cache */
1432 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1433 "or %%g1, %1, %%g1\n\t"
1434 "stxa %%g1, [%%g0] %0\n\t"
1435 "membar #Sync"
1436 : /* no outputs */
1437 : "i" (ASI_DCU_CONTROL_REG),
1438 "i" (DCU_IC)
1439 : "g1");
1440
1441 if (flush_all)
1442 cheetah_flush_ecache();
1443 else if (flush_line)
1444 cheetah_flush_ecache_line(afar);
1445 }
1446
1447 /* Re-enable error reporting */
1448 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1449 "or %%g1, %1, %%g1\n\t"
1450 "stxa %%g1, [%%g0] %0\n\t"
1451 "membar #Sync"
1452 : /* no outputs */
1453 : "i" (ASI_ESTATE_ERROR_EN),
1454 "i" (ESTATE_ERROR_CEEN)
1455 : "g1");
1456
1457 /* Decide if we can continue after handling this trap and
1458 * logging the error.
1459 */
1460 recoverable = 1;
1461 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1462 recoverable = 0;
1463
1464 /* Re-check AFSR/AFAR */
1465 (void) cheetah_recheck_errors(&local_snapshot);
1466
1467 /* Log errors. */
1468 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1469
1470 if (!recoverable)
1471 panic("Irrecoverable Correctable-ECC error trap.\n");
1472}
1473
1474void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1475{
1476 struct cheetah_err_info local_snapshot, *p;
1477 int recoverable, is_memory;
1478
1479#ifdef CONFIG_PCI
1480 /* Check for the special PCI poke sequence. */
1481 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1482 cheetah_flush_icache();
1483 cheetah_flush_dcache();
1484
1485 /* Re-enable I-cache/D-cache */
1486 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1487 "or %%g1, %1, %%g1\n\t"
1488 "stxa %%g1, [%%g0] %0\n\t"
1489 "membar #Sync"
1490 : /* no outputs */
1491 : "i" (ASI_DCU_CONTROL_REG),
1492 "i" (DCU_DC | DCU_IC)
1493 : "g1");
1494
1495 /* Re-enable error reporting */
1496 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1497 "or %%g1, %1, %%g1\n\t"
1498 "stxa %%g1, [%%g0] %0\n\t"
1499 "membar #Sync"
1500 : /* no outputs */
1501 : "i" (ASI_ESTATE_ERROR_EN),
1502 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1503 : "g1");
1504
1505 (void) cheetah_recheck_errors(NULL);
1506
1507 pci_poke_faulted = 1;
1508 regs->tpc += 4;
1509 regs->tnpc = regs->tpc + 4;
1510 return;
1511 }
1512#endif
1513
1514 p = cheetah_get_error_log(afsr);
1515 if (!p) {
1516 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1517 afsr, afar);
1518 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1519 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1520 prom_halt();
1521 }
1522
1523 /* Grab snapshot of logged error. */
1524 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1525
1526 /* If the current trap snapshot does not match what the
1527 * trap handler passed along into our args, big trouble.
1528 * In such a case, mark the local copy as invalid.
1529 *
1530 * Else, it matches and we mark the afsr in the non-local
1531 * copy as invalid so we may log new error traps there.
1532 */
1533 if (p->afsr != afsr || p->afar != afar)
1534 local_snapshot.afsr = CHAFSR_INVALID;
1535 else
1536 p->afsr = CHAFSR_INVALID;
1537
1538 is_memory = cheetah_check_main_memory(afar);
1539
1540 {
1541 int flush_all, flush_line;
1542
1543 flush_all = flush_line = 0;
1544 if ((afsr & CHAFSR_EDU) != 0UL) {
1545 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1546 flush_line = 1;
1547 else
1548 flush_all = 1;
1549 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1550 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1551 flush_line = 1;
1552 else
1553 flush_all = 1;
1554 }
1555
1556 cheetah_flush_icache();
1557 cheetah_flush_dcache();
1558
1559 /* Re-enable I/D caches */
1560 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1561 "or %%g1, %1, %%g1\n\t"
1562 "stxa %%g1, [%%g0] %0\n\t"
1563 "membar #Sync"
1564 : /* no outputs */
1565 : "i" (ASI_DCU_CONTROL_REG),
1566 "i" (DCU_IC | DCU_DC)
1567 : "g1");
1568
1569 if (flush_all)
1570 cheetah_flush_ecache();
1571 else if (flush_line)
1572 cheetah_flush_ecache_line(afar);
1573 }
1574
1575 /* Re-enable error reporting */
1576 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1577 "or %%g1, %1, %%g1\n\t"
1578 "stxa %%g1, [%%g0] %0\n\t"
1579 "membar #Sync"
1580 : /* no outputs */
1581 : "i" (ASI_ESTATE_ERROR_EN),
1582 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1583 : "g1");
1584
1585 /* Decide if we can continue after handling this trap and
1586 * logging the error.
1587 */
1588 recoverable = 1;
1589 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1590 recoverable = 0;
1591
1592 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1593 * error was logged while we had error reporting traps disabled.
1594 */
1595 if (cheetah_recheck_errors(&local_snapshot)) {
1596 unsigned long new_afsr = local_snapshot.afsr;
1597
1598 /* If we got a new asynchronous error, die... */
1599 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1600 CHAFSR_WDU | CHAFSR_CPU |
1601 CHAFSR_IVU | CHAFSR_UE |
1602 CHAFSR_BERR | CHAFSR_TO))
1603 recoverable = 0;
1604 }
1605
1606 /* Log errors. */
1607 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1608
1609 /* "Recoverable" here means we try to yank the page from ever
1610 * being newly used again. This depends upon a few things:
1611 * 1) Must be main memory, and AFAR must be valid.
1612 * 2) If we trapped from user, OK.
1613 * 3) Else, if we trapped from kernel we must find exception
1614 * table entry (ie. we have to have been accessing user
1615 * space).
1616 *
1617 * If AFAR is not in main memory, or we trapped from kernel
1618 * and cannot find an exception table entry, it is unacceptable
1619 * to try and continue.
1620 */
1621 if (recoverable && is_memory) {
1622 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1623 /* OK, usermode access. */
1624 recoverable = 1;
1625 } else {
8cf14af0 1626 const struct exception_table_entry *entry;
1da177e4 1627
8cf14af0
DM
1628 entry = search_exception_tables(regs->tpc);
1629 if (entry) {
1da177e4
LT
1630 /* OK, kernel access to userspace. */
1631 recoverable = 1;
1632
1633 } else {
1634 /* BAD, privileged state is corrupted. */
1635 recoverable = 0;
1636 }
1637
1638 if (recoverable) {
1639 if (pfn_valid(afar >> PAGE_SHIFT))
1640 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1641 else
1642 recoverable = 0;
1643
1644 /* Only perform fixup if we still have a
1645 * recoverable condition.
1646 */
1647 if (recoverable) {
8cf14af0 1648 regs->tpc = entry->fixup;
1da177e4 1649 regs->tnpc = regs->tpc + 4;
1da177e4
LT
1650 }
1651 }
1652 }
1653 } else {
1654 recoverable = 0;
1655 }
1656
1657 if (!recoverable)
1658 panic("Irrecoverable deferred error trap.\n");
1659}
1660
1661/* Handle a D/I cache parity error trap. TYPE is encoded as:
1662 *
1663 * Bit0: 0=dcache,1=icache
1664 * Bit1: 0=recoverable,1=unrecoverable
1665 *
1666 * The hardware has disabled both the I-cache and D-cache in
1667 * the %dcr register.
1668 */
1669void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1670{
1671 if (type & 0x1)
1672 __cheetah_flush_icache();
1673 else
1674 cheetah_plus_zap_dcache_parity();
1675 cheetah_flush_dcache();
1676
1677 /* Re-enable I-cache/D-cache */
1678 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1679 "or %%g1, %1, %%g1\n\t"
1680 "stxa %%g1, [%%g0] %0\n\t"
1681 "membar #Sync"
1682 : /* no outputs */
1683 : "i" (ASI_DCU_CONTROL_REG),
1684 "i" (DCU_DC | DCU_IC)
1685 : "g1");
1686
1687 if (type & 0x2) {
1688 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1689 smp_processor_id(),
1690 (type & 0x1) ? 'I' : 'D',
1691 regs->tpc);
5af47db7 1692 print_symbol(KERN_EMERG "TPC<%s>\n", regs->tpc);
1da177e4
LT
1693 panic("Irrecoverable Cheetah+ parity error.");
1694 }
1695
1696 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1697 smp_processor_id(),
1698 (type & 0x1) ? 'I' : 'D',
1699 regs->tpc);
5af47db7 1700 print_symbol(KERN_WARNING "TPC<%s>\n", regs->tpc);
1da177e4
LT
1701}
1702
5b0c0572
DM
1703struct sun4v_error_entry {
1704 u64 err_handle;
1705 u64 err_stick;
1706
1707 u32 err_type;
1708#define SUN4V_ERR_TYPE_UNDEFINED 0
1709#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1710#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1711#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1712#define SUN4V_ERR_TYPE_WARNING_RES 4
1713
1714 u32 err_attrs;
1715#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1716#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1717#define SUN4V_ERR_ATTRS_PIO 0x00000004
1718#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1719#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1720#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1721#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1722#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1723
1724 u64 err_raddr;
1725 u32 err_size;
1726 u16 err_cpu;
1727 u16 err_pad;
1728};
1729
1730static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1731static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1732
1733static const char *sun4v_err_type_to_str(u32 type)
1734{
1735 switch (type) {
1736 case SUN4V_ERR_TYPE_UNDEFINED:
1737 return "undefined";
1738 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1739 return "uncorrected resumable";
1740 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1741 return "precise nonresumable";
1742 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1743 return "deferred nonresumable";
1744 case SUN4V_ERR_TYPE_WARNING_RES:
1745 return "warning resumable";
1746 default:
1747 return "unknown";
1748 };
1749}
1750
5224e6cc 1751static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
5b0c0572
DM
1752{
1753 int cnt;
1754
1755 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1756 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1757 pfx,
1758 ent->err_handle, ent->err_stick,
1759 ent->err_type,
1760 sun4v_err_type_to_str(ent->err_type));
1761 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1762 pfx,
1763 ent->err_attrs,
1764 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1765 "processor" : ""),
1766 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1767 "memory" : ""),
1768 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1769 "pio" : ""),
1770 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1771 "integer-regs" : ""),
1772 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1773 "fpu-regs" : ""),
1774 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1775 "user" : ""),
1776 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1777 "privileged" : ""),
1778 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1779 "queue-full" : ""));
1780 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1781 pfx,
1782 ent->err_raddr, ent->err_size, ent->err_cpu);
1783
5224e6cc
DM
1784 __show_regs(regs);
1785
5b0c0572
DM
1786 if ((cnt = atomic_read(ocnt)) != 0) {
1787 atomic_set(ocnt, 0);
1788 wmb();
1789 printk("%s: Queue overflowed %d times.\n",
1790 pfx, cnt);
1791 }
1792}
1793
1794/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1795 * Log the event and clear the first word of the entry.
1796 */
1797void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1798{
1799 struct sun4v_error_entry *ent, local_copy;
1800 struct trap_per_cpu *tb;
1801 unsigned long paddr;
1802 int cpu;
1803
1804 cpu = get_cpu();
1805
1806 tb = &trap_block[cpu];
1807 paddr = tb->resum_kernel_buf_pa + offset;
1808 ent = __va(paddr);
1809
1810 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1811
1812 /* We have a local copy now, so release the entry. */
1813 ent->err_handle = 0;
1814 wmb();
1815
1816 put_cpu();
1817
a2c1e064
DM
1818 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1819 /* If err_type is 0x4, it's a powerdown request. Do
1820 * not do the usual resumable error log because that
1821 * makes it look like some abnormal error.
1822 */
1823 printk(KERN_INFO "Power down request...\n");
1824 kill_cad_pid(SIGINT, 1);
1825 return;
1826 }
1827
5224e6cc 1828 sun4v_log_error(regs, &local_copy, cpu,
5b0c0572
DM
1829 KERN_ERR "RESUMABLE ERROR",
1830 &sun4v_resum_oflow_cnt);
1831}
1832
1833/* If we try to printk() we'll probably make matters worse, by trying
1834 * to retake locks this cpu already holds or causing more errors. So
1835 * just bump a counter, and we'll report these counter bumps above.
1836 */
1837void sun4v_resum_overflow(struct pt_regs *regs)
1838{
1839 atomic_inc(&sun4v_resum_oflow_cnt);
1840}
1841
1842/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1843 * Log the event, clear the first word of the entry, and die.
1844 */
1845void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1846{
1847 struct sun4v_error_entry *ent, local_copy;
1848 struct trap_per_cpu *tb;
1849 unsigned long paddr;
1850 int cpu;
1851
1852 cpu = get_cpu();
1853
1854 tb = &trap_block[cpu];
1855 paddr = tb->nonresum_kernel_buf_pa + offset;
1856 ent = __va(paddr);
1857
1858 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1859
1860 /* We have a local copy now, so release the entry. */
1861 ent->err_handle = 0;
1862 wmb();
1863
1864 put_cpu();
1865
1866#ifdef CONFIG_PCI
1867 /* Check for the special PCI poke sequence. */
1868 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1869 pci_poke_faulted = 1;
1870 regs->tpc += 4;
1871 regs->tnpc = regs->tpc + 4;
1872 return;
1873 }
1874#endif
1875
5224e6cc 1876 sun4v_log_error(regs, &local_copy, cpu,
5b0c0572
DM
1877 KERN_EMERG "NON-RESUMABLE ERROR",
1878 &sun4v_nonresum_oflow_cnt);
1879
1880 panic("Non-resumable error.");
1881}
1882
1883/* If we try to printk() we'll probably make matters worse, by trying
1884 * to retake locks this cpu already holds or causing more errors. So
1885 * just bump a counter, and we'll report these counter bumps above.
1886 */
1887void sun4v_nonresum_overflow(struct pt_regs *regs)
1888{
1889 /* XXX Actually even this can make not that much sense. Perhaps
1890 * XXX we should just pull the plug and panic directly from here?
1891 */
1892 atomic_inc(&sun4v_nonresum_oflow_cnt);
1893}
1894
6c8927c9
DM
1895unsigned long sun4v_err_itlb_vaddr;
1896unsigned long sun4v_err_itlb_ctx;
1897unsigned long sun4v_err_itlb_pte;
1898unsigned long sun4v_err_itlb_error;
1899
1900void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1901{
1902 if (tl > 1)
1903 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1904
04d74758
DM
1905 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1906 regs->tpc, tl);
5af47db7 1907 print_symbol(KERN_EMERG "SUN4V-ITLB: TPC<%s>\n", regs->tpc);
6320bceb
DM
1908 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1909 print_symbol(KERN_EMERG "SUN4V-ITLB: O7<%s>\n", regs->u_regs[UREG_I7]);
04d74758
DM
1910 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1911 "pte[%lx] error[%lx]\n",
6c8927c9
DM
1912 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1913 sun4v_err_itlb_pte, sun4v_err_itlb_error);
04d74758 1914
6c8927c9
DM
1915 prom_halt();
1916}
1917
1918unsigned long sun4v_err_dtlb_vaddr;
1919unsigned long sun4v_err_dtlb_ctx;
1920unsigned long sun4v_err_dtlb_pte;
1921unsigned long sun4v_err_dtlb_error;
1922
1923void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1924{
1925 if (tl > 1)
1926 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1927
04d74758
DM
1928 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1929 regs->tpc, tl);
5af47db7 1930 print_symbol(KERN_EMERG "SUN4V-DTLB: TPC<%s>\n", regs->tpc);
6320bceb
DM
1931 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1932 print_symbol(KERN_EMERG "SUN4V-DTLB: O7<%s>\n", regs->u_regs[UREG_I7]);
04d74758
DM
1933 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1934 "pte[%lx] error[%lx]\n",
6c8927c9
DM
1935 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1936 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
04d74758 1937
6c8927c9
DM
1938 prom_halt();
1939}
1940
2a3a5f5d
DM
1941void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1942{
1943 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1944 err, op);
1945}
1946
1947void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1948{
1949 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1950 err, op);
1951}
1952
1da177e4
LT
1953void do_fpe_common(struct pt_regs *regs)
1954{
1955 if (regs->tstate & TSTATE_PRIV) {
1956 regs->tpc = regs->tnpc;
1957 regs->tnpc += 4;
1958 } else {
1959 unsigned long fsr = current_thread_info()->xfsr[0];
1960 siginfo_t info;
1961
1962 if (test_thread_flag(TIF_32BIT)) {
1963 regs->tpc &= 0xffffffff;
1964 regs->tnpc &= 0xffffffff;
1965 }
1966 info.si_signo = SIGFPE;
1967 info.si_errno = 0;
1968 info.si_addr = (void __user *)regs->tpc;
1969 info.si_trapno = 0;
1970 info.si_code = __SI_FAULT;
1971 if ((fsr & 0x1c000) == (1 << 14)) {
1972 if (fsr & 0x10)
1973 info.si_code = FPE_FLTINV;
1974 else if (fsr & 0x08)
1975 info.si_code = FPE_FLTOVF;
1976 else if (fsr & 0x04)
1977 info.si_code = FPE_FLTUND;
1978 else if (fsr & 0x02)
1979 info.si_code = FPE_FLTDIV;
1980 else if (fsr & 0x01)
1981 info.si_code = FPE_FLTRES;
1982 }
1983 force_sig_info(SIGFPE, &info, current);
1984 }
1985}
1986
1987void do_fpieee(struct pt_regs *regs)
1988{
1989 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1990 0, 0x24, SIGFPE) == NOTIFY_STOP)
1991 return;
1992
1993 do_fpe_common(regs);
1994}
1995
1996extern int do_mathemu(struct pt_regs *, struct fpustate *);
1997
1998void do_fpother(struct pt_regs *regs)
1999{
2000 struct fpustate *f = FPUSTATE;
2001 int ret = 0;
2002
2003 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2004 0, 0x25, SIGFPE) == NOTIFY_STOP)
2005 return;
2006
2007 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2008 case (2 << 14): /* unfinished_FPop */
2009 case (3 << 14): /* unimplemented_FPop */
2010 ret = do_mathemu(regs, f);
2011 break;
2012 }
2013 if (ret)
2014 return;
2015 do_fpe_common(regs);
2016}
2017
2018void do_tof(struct pt_regs *regs)
2019{
2020 siginfo_t info;
2021
2022 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2023 0, 0x26, SIGEMT) == NOTIFY_STOP)
2024 return;
2025
2026 if (regs->tstate & TSTATE_PRIV)
2027 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2028 if (test_thread_flag(TIF_32BIT)) {
2029 regs->tpc &= 0xffffffff;
2030 regs->tnpc &= 0xffffffff;
2031 }
2032 info.si_signo = SIGEMT;
2033 info.si_errno = 0;
2034 info.si_code = EMT_TAGOVF;
2035 info.si_addr = (void __user *)regs->tpc;
2036 info.si_trapno = 0;
2037 force_sig_info(SIGEMT, &info, current);
2038}
2039
2040void do_div0(struct pt_regs *regs)
2041{
2042 siginfo_t info;
2043
2044 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2045 0, 0x28, SIGFPE) == NOTIFY_STOP)
2046 return;
2047
2048 if (regs->tstate & TSTATE_PRIV)
2049 die_if_kernel("TL0: Kernel divide by zero.", regs);
2050 if (test_thread_flag(TIF_32BIT)) {
2051 regs->tpc &= 0xffffffff;
2052 regs->tnpc &= 0xffffffff;
2053 }
2054 info.si_signo = SIGFPE;
2055 info.si_errno = 0;
2056 info.si_code = FPE_INTDIV;
2057 info.si_addr = (void __user *)regs->tpc;
2058 info.si_trapno = 0;
2059 force_sig_info(SIGFPE, &info, current);
2060}
2061
99cd2201 2062static void instruction_dump(unsigned int *pc)
1da177e4
LT
2063{
2064 int i;
2065
2066 if ((((unsigned long) pc) & 3))
2067 return;
2068
2069 printk("Instruction DUMP:");
2070 for (i = -3; i < 6; i++)
2071 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2072 printk("\n");
2073}
2074
99cd2201 2075static void user_instruction_dump(unsigned int __user *pc)
1da177e4
LT
2076{
2077 int i;
2078 unsigned int buf[9];
2079
2080 if ((((unsigned long) pc) & 3))
2081 return;
2082
2083 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2084 return;
2085
2086 printk("Instruction DUMP:");
2087 for (i = 0; i < 9; i++)
2088 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2089 printk("\n");
2090}
2091
2092void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2093{
2094 unsigned long pc, fp, thread_base, ksp;
c1f193a7 2095 struct thread_info *tp;
1da177e4
LT
2096 struct reg_window *rw;
2097 int count = 0;
2098
2099 ksp = (unsigned long) _ksp;
c1f193a7
DM
2100 if (!tsk)
2101 tsk = current;
2102 tp = task_thread_info(tsk);
2103 if (ksp == 0UL) {
2104 if (tsk == current)
2105 asm("mov %%fp, %0" : "=r" (ksp));
2106 else
2107 ksp = tp->ksp;
2108 }
1da177e4
LT
2109 if (tp == current_thread_info())
2110 flushw_all();
2111
2112 fp = ksp + STACK_BIAS;
2113 thread_base = (unsigned long) tp;
2114
2115 printk("Call Trace:");
2116#ifdef CONFIG_KALLSYMS
2117 printk("\n");
2118#endif
2119 do {
2120 /* Bogus frame pointer? */
2121 if (fp < (thread_base + sizeof(struct thread_info)) ||
2122 fp >= (thread_base + THREAD_SIZE))
2123 break;
2124 rw = (struct reg_window *)fp;
2125 pc = rw->ins[7];
2126 printk(" [%016lx] ", pc);
2127 print_symbol("%s\n", pc);
2128 fp = rw->ins[6] + STACK_BIAS;
2129 } while (++count < 16);
2130#ifndef CONFIG_KALLSYMS
2131 printk("\n");
2132#endif
2133}
2134
2135void dump_stack(void)
2136{
c1f193a7 2137 show_stack(current, NULL);
1da177e4
LT
2138}
2139
2140EXPORT_SYMBOL(dump_stack);
2141
2142static inline int is_kernel_stack(struct task_struct *task,
2143 struct reg_window *rw)
2144{
2145 unsigned long rw_addr = (unsigned long) rw;
2146 unsigned long thread_base, thread_end;
2147
2148 if (rw_addr < PAGE_OFFSET) {
2149 if (task != &init_task)
2150 return 0;
2151 }
2152
ee3eea16 2153 thread_base = (unsigned long) task_stack_page(task);
1da177e4
LT
2154 thread_end = thread_base + sizeof(union thread_union);
2155 if (rw_addr >= thread_base &&
2156 rw_addr < thread_end &&
2157 !(rw_addr & 0x7UL))
2158 return 1;
2159
2160 return 0;
2161}
2162
2163static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2164{
2165 unsigned long fp = rw->ins[6];
2166
2167 if (!fp)
2168 return NULL;
2169
2170 return (struct reg_window *) (fp + STACK_BIAS);
2171}
2172
2173void die_if_kernel(char *str, struct pt_regs *regs)
2174{
2175 static int die_counter;
1da177e4
LT
2176 extern void smp_report_regs(void);
2177 int count = 0;
2178
2179 /* Amuse the user. */
2180 printk(
2181" \\|/ ____ \\|/\n"
2182" \"@'/ .. \\`@\"\n"
2183" /_| \\__/ |_\\\n"
2184" \\__U_/\n");
2185
19c5870c 2186 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
1da177e4
LT
2187 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2188 __asm__ __volatile__("flushw");
2189 __show_regs(regs);
bcdcd8e7 2190 add_taint(TAINT_DIE);
1da177e4
LT
2191 if (regs->tstate & TSTATE_PRIV) {
2192 struct reg_window *rw = (struct reg_window *)
2193 (regs->u_regs[UREG_FP] + STACK_BIAS);
2194
2195 /* Stop the back trace when we hit userland or we
2196 * find some badly aligned kernel stack.
2197 */
2198 while (rw &&
2199 count++ < 30&&
2200 is_kernel_stack(current, rw)) {
2201 printk("Caller[%016lx]", rw->ins[7]);
2202 print_symbol(": %s", rw->ins[7]);
2203 printk("\n");
2204
2205 rw = kernel_stack_up(rw);
2206 }
2207 instruction_dump ((unsigned int *) regs->tpc);
2208 } else {
2209 if (test_thread_flag(TIF_32BIT)) {
2210 regs->tpc &= 0xffffffff;
2211 regs->tnpc &= 0xffffffff;
2212 }
2213 user_instruction_dump ((unsigned int __user *) regs->tpc);
2214 }
37133c00 2215#if 0
1da177e4
LT
2216#ifdef CONFIG_SMP
2217 smp_report_regs();
2218#endif
37133c00 2219#endif
1da177e4
LT
2220 if (regs->tstate & TSTATE_PRIV)
2221 do_exit(SIGKILL);
2222 do_exit(SIGSEGV);
2223}
2224
6e7726e1
DM
2225#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2226#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2227
1da177e4
LT
2228extern int handle_popc(u32 insn, struct pt_regs *regs);
2229extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
6e7726e1 2230extern int vis_emul(struct pt_regs *, unsigned int);
1da177e4
LT
2231
2232void do_illegal_instruction(struct pt_regs *regs)
2233{
2234 unsigned long pc = regs->tpc;
2235 unsigned long tstate = regs->tstate;
2236 u32 insn;
2237 siginfo_t info;
2238
2239 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2240 0, 0x10, SIGILL) == NOTIFY_STOP)
2241 return;
2242
2243 if (tstate & TSTATE_PRIV)
2244 die_if_kernel("Kernel illegal instruction", regs);
2245 if (test_thread_flag(TIF_32BIT))
2246 pc = (u32)pc;
2247 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2248 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2249 if (handle_popc(insn, regs))
2250 return;
2251 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2252 if (handle_ldf_stq(insn, regs))
2253 return;
0c51ed93 2254 } else if (tlb_type == hypervisor) {
6e7726e1
DM
2255 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2256 if (!vis_emul(regs, insn))
2257 return;
2258 } else {
2259 struct fpustate *f = FPUSTATE;
0c51ed93 2260
6e7726e1
DM
2261 /* XXX maybe verify XFSR bits like
2262 * XXX do_fpother() does?
2263 */
2264 if (do_mathemu(regs, f))
2265 return;
2266 }
1da177e4
LT
2267 }
2268 }
2269 info.si_signo = SIGILL;
2270 info.si_errno = 0;
2271 info.si_code = ILL_ILLOPC;
2272 info.si_addr = (void __user *)pc;
2273 info.si_trapno = 0;
2274 force_sig_info(SIGILL, &info, current);
2275}
2276
ed6b0b45
DM
2277extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2278
1da177e4
LT
2279void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2280{
2281 siginfo_t info;
2282
2283 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2284 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2285 return;
2286
2287 if (regs->tstate & TSTATE_PRIV) {
ed6b0b45 2288 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1da177e4
LT
2289 return;
2290 }
2291 info.si_signo = SIGBUS;
2292 info.si_errno = 0;
2293 info.si_code = BUS_ADRALN;
2294 info.si_addr = (void __user *)sfar;
2295 info.si_trapno = 0;
2296 force_sig_info(SIGBUS, &info, current);
2297}
2298
9f8a5b84 2299void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
ed6b0b45
DM
2300{
2301 siginfo_t info;
2302
2303 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2304 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2305 return;
2306
2307 if (regs->tstate & TSTATE_PRIV) {
2308 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2309 return;
2310 }
2311 info.si_signo = SIGBUS;
2312 info.si_errno = 0;
2313 info.si_code = BUS_ADRALN;
2314 info.si_addr = (void __user *) addr;
2315 info.si_trapno = 0;
2316 force_sig_info(SIGBUS, &info, current);
2317}
2318
1da177e4
LT
2319void do_privop(struct pt_regs *regs)
2320{
2321 siginfo_t info;
2322
2323 if (notify_die(DIE_TRAP, "privileged operation", regs,
2324 0, 0x11, SIGILL) == NOTIFY_STOP)
2325 return;
2326
2327 if (test_thread_flag(TIF_32BIT)) {
2328 regs->tpc &= 0xffffffff;
2329 regs->tnpc &= 0xffffffff;
2330 }
2331 info.si_signo = SIGILL;
2332 info.si_errno = 0;
2333 info.si_code = ILL_PRVOPC;
2334 info.si_addr = (void __user *)regs->tpc;
2335 info.si_trapno = 0;
2336 force_sig_info(SIGILL, &info, current);
2337}
2338
2339void do_privact(struct pt_regs *regs)
2340{
2341 do_privop(regs);
2342}
2343
2344/* Trap level 1 stuff or other traps we should never see... */
2345void do_cee(struct pt_regs *regs)
2346{
2347 die_if_kernel("TL0: Cache Error Exception", regs);
2348}
2349
2350void do_cee_tl1(struct pt_regs *regs)
2351{
2352 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2353 die_if_kernel("TL1: Cache Error Exception", regs);
2354}
2355
2356void do_dae_tl1(struct pt_regs *regs)
2357{
2358 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2359 die_if_kernel("TL1: Data Access Exception", regs);
2360}
2361
2362void do_iae_tl1(struct pt_regs *regs)
2363{
2364 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2365 die_if_kernel("TL1: Instruction Access Exception", regs);
2366}
2367
2368void do_div0_tl1(struct pt_regs *regs)
2369{
2370 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2371 die_if_kernel("TL1: DIV0 Exception", regs);
2372}
2373
2374void do_fpdis_tl1(struct pt_regs *regs)
2375{
2376 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2377 die_if_kernel("TL1: FPU Disabled", regs);
2378}
2379
2380void do_fpieee_tl1(struct pt_regs *regs)
2381{
2382 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2383 die_if_kernel("TL1: FPU IEEE Exception", regs);
2384}
2385
2386void do_fpother_tl1(struct pt_regs *regs)
2387{
2388 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2389 die_if_kernel("TL1: FPU Other Exception", regs);
2390}
2391
2392void do_ill_tl1(struct pt_regs *regs)
2393{
2394 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2395 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2396}
2397
2398void do_irq_tl1(struct pt_regs *regs)
2399{
2400 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2401 die_if_kernel("TL1: IRQ Exception", regs);
2402}
2403
2404void do_lddfmna_tl1(struct pt_regs *regs)
2405{
2406 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2407 die_if_kernel("TL1: LDDF Exception", regs);
2408}
2409
2410void do_stdfmna_tl1(struct pt_regs *regs)
2411{
2412 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2413 die_if_kernel("TL1: STDF Exception", regs);
2414}
2415
2416void do_paw(struct pt_regs *regs)
2417{
2418 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2419}
2420
2421void do_paw_tl1(struct pt_regs *regs)
2422{
2423 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2424 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2425}
2426
2427void do_vaw(struct pt_regs *regs)
2428{
2429 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2430}
2431
2432void do_vaw_tl1(struct pt_regs *regs)
2433{
2434 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2435 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2436}
2437
2438void do_tof_tl1(struct pt_regs *regs)
2439{
2440 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2441 die_if_kernel("TL1: Tag Overflow Exception", regs);
2442}
2443
2444void do_getpsr(struct pt_regs *regs)
2445{
2446 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2447 regs->tpc = regs->tnpc;
2448 regs->tnpc += 4;
2449 if (test_thread_flag(TIF_32BIT)) {
2450 regs->tpc &= 0xffffffff;
2451 regs->tnpc &= 0xffffffff;
2452 }
2453}
2454
56fb4df6
DM
2455struct trap_per_cpu trap_block[NR_CPUS];
2456
2457/* This can get invoked before sched_init() so play it super safe
2458 * and use hard_smp_processor_id().
2459 */
72aff53f 2460void init_cur_cpu_trap(struct thread_info *t)
56fb4df6
DM
2461{
2462 int cpu = hard_smp_processor_id();
2463 struct trap_per_cpu *p = &trap_block[cpu];
2464
72aff53f 2465 p->thread = t;
56fb4df6
DM
2466 p->pgd_paddr = 0;
2467}
2468
1da177e4 2469extern void thread_info_offsets_are_bolixed_dave(void);
56fb4df6 2470extern void trap_per_cpu_offsets_are_bolixed_dave(void);
dcc1e8dd 2471extern void tsb_config_offsets_are_bolixed_dave(void);
1da177e4
LT
2472
2473/* Only invoked on boot processor. */
2474void __init trap_init(void)
2475{
2476 /* Compile time sanity check. */
2477 if (TI_TASK != offsetof(struct thread_info, task) ||
2478 TI_FLAGS != offsetof(struct thread_info, flags) ||
2479 TI_CPU != offsetof(struct thread_info, cpu) ||
2480 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2481 TI_KSP != offsetof(struct thread_info, ksp) ||
2482 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2483 TI_KREGS != offsetof(struct thread_info, kregs) ||
2484 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2485 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2486 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2487 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2488 TI_GSR != offsetof(struct thread_info, gsr) ||
2489 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2490 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2491 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2492 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2493 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2494 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
1da177e4 2495 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
db7d9a4e
DM
2496 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2497 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
a3f99858
DM
2498 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2499 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2500 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
1da177e4
LT
2501 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2502 (TI_FPREGS & (64 - 1)))
2503 thread_info_offsets_are_bolixed_dave();
2504
56fb4df6 2505 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
e088ad7c
DM
2506 (TRAP_PER_CPU_PGD_PADDR !=
2507 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2508 (TRAP_PER_CPU_CPU_MONDO_PA !=
2509 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2510 (TRAP_PER_CPU_DEV_MONDO_PA !=
2511 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2512 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2513 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
5b0c0572
DM
2514 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2515 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
e088ad7c
DM
2516 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2517 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
5b0c0572
DM
2518 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2519 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
e088ad7c 2520 (TRAP_PER_CPU_FAULT_INFO !=
1d2f1f90
DM
2521 offsetof(struct trap_per_cpu, fault_info)) ||
2522 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2523 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2524 (TRAP_PER_CPU_CPU_LIST_PA !=
dcc1e8dd
DM
2525 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2526 (TRAP_PER_CPU_TSB_HUGE !=
2527 offsetof(struct trap_per_cpu, tsb_huge)) ||
2528 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
fd0504c3 2529 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
eb2d8d60
DM
2530 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2531 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
5cbc3073
DM
2532 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2533 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2534 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2535 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2536 (TRAP_PER_CPU_RESUM_QMASK !=
2537 offsetof(struct trap_per_cpu, resum_qmask)) ||
2538 (TRAP_PER_CPU_NONRESUM_QMASK !=
2539 offsetof(struct trap_per_cpu, nonresum_qmask)))
56fb4df6
DM
2540 trap_per_cpu_offsets_are_bolixed_dave();
2541
dcc1e8dd
DM
2542 if ((TSB_CONFIG_TSB !=
2543 offsetof(struct tsb_config, tsb)) ||
2544 (TSB_CONFIG_RSS_LIMIT !=
2545 offsetof(struct tsb_config, tsb_rss_limit)) ||
2546 (TSB_CONFIG_NENTRIES !=
2547 offsetof(struct tsb_config, tsb_nentries)) ||
2548 (TSB_CONFIG_REG_VAL !=
2549 offsetof(struct tsb_config, tsb_reg_val)) ||
2550 (TSB_CONFIG_MAP_VADDR !=
2551 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2552 (TSB_CONFIG_MAP_PTE !=
2553 offsetof(struct tsb_config, tsb_map_pte)))
2554 tsb_config_offsets_are_bolixed_dave();
2555
1da177e4
LT
2556 /* Attach to the address space of init_task. On SMP we
2557 * do this in smp.c:smp_callin for other cpus.
2558 */
2559 atomic_inc(&init_mm.mm_count);
2560 current->active_mm = &init_mm;
2561}