[SPARC64]: Convert to use generic exception table support.
[deliverable/linux.git] / arch / sparc64 / kernel / traps.c
1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8 /*
9 * I like traps on v9, :))))
10 */
11
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
32 #include <asm/lsu.h>
33 #include <asm/dcu.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
41 #ifdef CONFIG_KMOD
42 #include <linux/kmod.h>
43 #endif
44
45 struct notifier_block *sparc64die_chain;
46 static DEFINE_SPINLOCK(die_notifier_lock);
47
48 int register_die_notifier(struct notifier_block *nb)
49 {
50 int err = 0;
51 unsigned long flags;
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
55 return err;
56 }
57
58 /* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
61 * is as follows:
62 */
63 struct tl1_traplog {
64 struct {
65 unsigned long tstate;
66 unsigned long tpc;
67 unsigned long tnpc;
68 unsigned long tt;
69 } trapstack[4];
70 unsigned long tl;
71 };
72
73 static void dump_tl1_traplog(struct tl1_traplog *p)
74 {
75 int i;
76
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
78 p->tl);
79 for (i = 0; i < 4; i++) {
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
83 i + 1,
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
86 }
87 }
88
89 void do_call_debug(struct pt_regs *regs)
90 {
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
92 }
93
94 void bad_trap(struct pt_regs *regs, long lvl)
95 {
96 char buffer[32];
97 siginfo_t info;
98
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
101 return;
102
103 if (lvl < 0x100) {
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
106 }
107
108 lvl -= 0x100;
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
112 }
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
116 }
117 info.si_signo = SIGILL;
118 info.si_errno = 0;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
123 }
124
125 void bad_trap_tl1(struct pt_regs *regs, long lvl)
126 {
127 char buffer[32];
128
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
131 return;
132
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
134
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
137 }
138
139 #ifdef CONFIG_DEBUG_BUGVERBOSE
140 void do_BUG(const char *file, int line)
141 {
142 bust_spinlocks(1);
143 printk("kernel BUG at %s:%d!\n", file, line);
144 }
145 #endif
146
147 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
148 {
149 siginfo_t info;
150
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
154
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs);
159 }
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
163 }
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
170 }
171
172 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
173 {
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
176 return;
177
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
179 spitfire_insn_access_exception(regs, sfsr, sfar);
180 }
181
182 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
183 {
184 siginfo_t info;
185
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
188 return;
189
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
192 const struct exception_table_entry *entry;
193
194 entry = search_exception_tables(regs->tpc);
195 if (entry) {
196 /* Ouch, somebody is trying VM hole tricks on us... */
197 #ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
200 regs->tpc, entry->fixup);
201 #endif
202 regs->tpc = entry->fixup;
203 regs->tnpc = regs->tpc + 4;
204 return;
205 }
206 /* Shit... */
207 printk("spitfire_data_access_exception: SFSR[%016lx] "
208 "SFAR[%016lx], going.\n", sfsr, sfar);
209 die_if_kernel("Dax", regs);
210 }
211
212 info.si_signo = SIGSEGV;
213 info.si_errno = 0;
214 info.si_code = SEGV_MAPERR;
215 info.si_addr = (void __user *)sfar;
216 info.si_trapno = 0;
217 force_sig_info(SIGSEGV, &info, current);
218 }
219
220 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
221 {
222 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
223 0, 0x30, SIGTRAP) == NOTIFY_STOP)
224 return;
225
226 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
227 spitfire_data_access_exception(regs, sfsr, sfar);
228 }
229
230 #ifdef CONFIG_PCI
231 /* This is really pathetic... */
232 extern volatile int pci_poke_in_progress;
233 extern volatile int pci_poke_cpu;
234 extern volatile int pci_poke_faulted;
235 #endif
236
237 /* When access exceptions happen, we must do this. */
238 static void spitfire_clean_and_reenable_l1_caches(void)
239 {
240 unsigned long va;
241
242 if (tlb_type != spitfire)
243 BUG();
244
245 /* Clean 'em. */
246 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
247 spitfire_put_icache_tag(va, 0x0);
248 spitfire_put_dcache_tag(va, 0x0);
249 }
250
251 /* Re-enable in LSU. */
252 __asm__ __volatile__("flush %%g6\n\t"
253 "membar #Sync\n\t"
254 "stxa %0, [%%g0] %1\n\t"
255 "membar #Sync"
256 : /* no outputs */
257 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
258 LSU_CONTROL_IM | LSU_CONTROL_DM),
259 "i" (ASI_LSU_CONTROL)
260 : "memory");
261 }
262
263 static void spitfire_enable_estate_errors(void)
264 {
265 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
266 "membar #Sync"
267 : /* no outputs */
268 : "r" (ESTATE_ERR_ALL),
269 "i" (ASI_ESTATE_ERROR_EN));
270 }
271
272 static char ecc_syndrome_table[] = {
273 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
274 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
275 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
276 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
277 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
278 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
279 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
280 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
281 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
282 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
283 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
284 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
285 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
286 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
287 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
288 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
289 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
290 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
291 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
292 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
293 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
294 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
295 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
296 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
297 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
298 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
299 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
300 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
301 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
302 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
303 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
304 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
305 };
306
307 static char *syndrome_unknown = "<Unknown>";
308
309 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
310 {
311 unsigned short scode;
312 char memmod_str[64], *p;
313
314 if (udbl & bit) {
315 scode = ecc_syndrome_table[udbl & 0xff];
316 if (prom_getunumber(scode, afar,
317 memmod_str, sizeof(memmod_str)) == -1)
318 p = syndrome_unknown;
319 else
320 p = memmod_str;
321 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
322 "Memory Module \"%s\"\n",
323 smp_processor_id(), scode, p);
324 }
325
326 if (udbh & bit) {
327 scode = ecc_syndrome_table[udbh & 0xff];
328 if (prom_getunumber(scode, afar,
329 memmod_str, sizeof(memmod_str)) == -1)
330 p = syndrome_unknown;
331 else
332 p = memmod_str;
333 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
334 "Memory Module \"%s\"\n",
335 smp_processor_id(), scode, p);
336 }
337
338 }
339
340 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
341 {
342
343 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
344 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
345 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
346
347 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
348
349 /* We always log it, even if someone is listening for this
350 * trap.
351 */
352 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
353 0, TRAP_TYPE_CEE, SIGTRAP);
354
355 /* The Correctable ECC Error trap does not disable I/D caches. So
356 * we only have to restore the ESTATE Error Enable register.
357 */
358 spitfire_enable_estate_errors();
359 }
360
361 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
362 {
363 siginfo_t info;
364
365 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
366 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
367 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
368
369 /* XXX add more human friendly logging of the error status
370 * XXX as is implemented for cheetah
371 */
372
373 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
374
375 /* We always log it, even if someone is listening for this
376 * trap.
377 */
378 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
379 0, tt, SIGTRAP);
380
381 if (regs->tstate & TSTATE_PRIV) {
382 if (tl1)
383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
384 die_if_kernel("UE", regs);
385 }
386
387 /* XXX need more intelligent processing here, such as is implemented
388 * XXX for cheetah errors, in fact if the E-cache still holds the
389 * XXX line with bad parity this will loop
390 */
391
392 spitfire_clean_and_reenable_l1_caches();
393 spitfire_enable_estate_errors();
394
395 if (test_thread_flag(TIF_32BIT)) {
396 regs->tpc &= 0xffffffff;
397 regs->tnpc &= 0xffffffff;
398 }
399 info.si_signo = SIGBUS;
400 info.si_errno = 0;
401 info.si_code = BUS_OBJERR;
402 info.si_addr = (void *)0;
403 info.si_trapno = 0;
404 force_sig_info(SIGBUS, &info, current);
405 }
406
407 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
408 {
409 unsigned long afsr, tt, udbh, udbl;
410 int tl1;
411
412 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
413 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
414 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
415 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
416 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
417
418 #ifdef CONFIG_PCI
419 if (tt == TRAP_TYPE_DAE &&
420 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
421 spitfire_clean_and_reenable_l1_caches();
422 spitfire_enable_estate_errors();
423
424 pci_poke_faulted = 1;
425 regs->tnpc = regs->tpc + 4;
426 return;
427 }
428 #endif
429
430 if (afsr & SFAFSR_UE)
431 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
432
433 if (tt == TRAP_TYPE_CEE) {
434 /* Handle the case where we took a CEE trap, but ACK'd
435 * only the UE state in the UDB error registers.
436 */
437 if (afsr & SFAFSR_UE) {
438 if (udbh & UDBE_CE) {
439 __asm__ __volatile__(
440 "stxa %0, [%1] %2\n\t"
441 "membar #Sync"
442 : /* no outputs */
443 : "r" (udbh & UDBE_CE),
444 "r" (0x0), "i" (ASI_UDB_ERROR_W));
445 }
446 if (udbl & UDBE_CE) {
447 __asm__ __volatile__(
448 "stxa %0, [%1] %2\n\t"
449 "membar #Sync"
450 : /* no outputs */
451 : "r" (udbl & UDBE_CE),
452 "r" (0x18), "i" (ASI_UDB_ERROR_W));
453 }
454 }
455
456 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
457 }
458 }
459
460 int cheetah_pcache_forced_on;
461
462 void cheetah_enable_pcache(void)
463 {
464 unsigned long dcr;
465
466 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
467 smp_processor_id());
468
469 __asm__ __volatile__("ldxa [%%g0] %1, %0"
470 : "=r" (dcr)
471 : "i" (ASI_DCU_CONTROL_REG));
472 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
473 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
474 "membar #Sync"
475 : /* no outputs */
476 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
477 }
478
479 /* Cheetah error trap handling. */
480 static unsigned long ecache_flush_physbase;
481 static unsigned long ecache_flush_linesize;
482 static unsigned long ecache_flush_size;
483
484 /* WARNING: The error trap handlers in assembly know the precise
485 * layout of the following structure.
486 *
487 * C-level handlers below use this information to log the error
488 * and then determine how to recover (if possible).
489 */
490 struct cheetah_err_info {
491 /*0x00*/u64 afsr;
492 /*0x08*/u64 afar;
493
494 /* D-cache state */
495 /*0x10*/u64 dcache_data[4]; /* The actual data */
496 /*0x30*/u64 dcache_index; /* D-cache index */
497 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
498 /*0x40*/u64 dcache_utag; /* D-cache microtag */
499 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
500
501 /* I-cache state */
502 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
503 /*0x90*/u64 icache_index; /* I-cache index */
504 /*0x98*/u64 icache_tag; /* I-cache phys tag */
505 /*0xa0*/u64 icache_utag; /* I-cache microtag */
506 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
507 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
508 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
509
510 /* E-cache state */
511 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
512 /*0xe0*/u64 ecache_index; /* E-cache index */
513 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
514
515 /*0xf0*/u64 __pad[32 - 30];
516 };
517 #define CHAFSR_INVALID ((u64)-1L)
518
519 /* This table is ordered in priority of errors and matches the
520 * AFAR overwrite policy as well.
521 */
522
523 struct afsr_error_table {
524 unsigned long mask;
525 const char *name;
526 };
527
528 static const char CHAFSR_PERR_msg[] =
529 "System interface protocol error";
530 static const char CHAFSR_IERR_msg[] =
531 "Internal processor error";
532 static const char CHAFSR_ISAP_msg[] =
533 "System request parity error on incoming addresss";
534 static const char CHAFSR_UCU_msg[] =
535 "Uncorrectable E-cache ECC error for ifetch/data";
536 static const char CHAFSR_UCC_msg[] =
537 "SW Correctable E-cache ECC error for ifetch/data";
538 static const char CHAFSR_UE_msg[] =
539 "Uncorrectable system bus data ECC error for read";
540 static const char CHAFSR_EDU_msg[] =
541 "Uncorrectable E-cache ECC error for stmerge/blkld";
542 static const char CHAFSR_EMU_msg[] =
543 "Uncorrectable system bus MTAG error";
544 static const char CHAFSR_WDU_msg[] =
545 "Uncorrectable E-cache ECC error for writeback";
546 static const char CHAFSR_CPU_msg[] =
547 "Uncorrectable ECC error for copyout";
548 static const char CHAFSR_CE_msg[] =
549 "HW corrected system bus data ECC error for read";
550 static const char CHAFSR_EDC_msg[] =
551 "HW corrected E-cache ECC error for stmerge/blkld";
552 static const char CHAFSR_EMC_msg[] =
553 "HW corrected system bus MTAG ECC error";
554 static const char CHAFSR_WDC_msg[] =
555 "HW corrected E-cache ECC error for writeback";
556 static const char CHAFSR_CPC_msg[] =
557 "HW corrected ECC error for copyout";
558 static const char CHAFSR_TO_msg[] =
559 "Unmapped error from system bus";
560 static const char CHAFSR_BERR_msg[] =
561 "Bus error response from system bus";
562 static const char CHAFSR_IVC_msg[] =
563 "HW corrected system bus data ECC error for ivec read";
564 static const char CHAFSR_IVU_msg[] =
565 "Uncorrectable system bus data ECC error for ivec read";
566 static struct afsr_error_table __cheetah_error_table[] = {
567 { CHAFSR_PERR, CHAFSR_PERR_msg },
568 { CHAFSR_IERR, CHAFSR_IERR_msg },
569 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
570 { CHAFSR_UCU, CHAFSR_UCU_msg },
571 { CHAFSR_UCC, CHAFSR_UCC_msg },
572 { CHAFSR_UE, CHAFSR_UE_msg },
573 { CHAFSR_EDU, CHAFSR_EDU_msg },
574 { CHAFSR_EMU, CHAFSR_EMU_msg },
575 { CHAFSR_WDU, CHAFSR_WDU_msg },
576 { CHAFSR_CPU, CHAFSR_CPU_msg },
577 { CHAFSR_CE, CHAFSR_CE_msg },
578 { CHAFSR_EDC, CHAFSR_EDC_msg },
579 { CHAFSR_EMC, CHAFSR_EMC_msg },
580 { CHAFSR_WDC, CHAFSR_WDC_msg },
581 { CHAFSR_CPC, CHAFSR_CPC_msg },
582 { CHAFSR_TO, CHAFSR_TO_msg },
583 { CHAFSR_BERR, CHAFSR_BERR_msg },
584 /* These two do not update the AFAR. */
585 { CHAFSR_IVC, CHAFSR_IVC_msg },
586 { CHAFSR_IVU, CHAFSR_IVU_msg },
587 { 0, NULL },
588 };
589 static const char CHPAFSR_DTO_msg[] =
590 "System bus unmapped error for prefetch/storequeue-read";
591 static const char CHPAFSR_DBERR_msg[] =
592 "System bus error for prefetch/storequeue-read";
593 static const char CHPAFSR_THCE_msg[] =
594 "Hardware corrected E-cache Tag ECC error";
595 static const char CHPAFSR_TSCE_msg[] =
596 "SW handled correctable E-cache Tag ECC error";
597 static const char CHPAFSR_TUE_msg[] =
598 "Uncorrectable E-cache Tag ECC error";
599 static const char CHPAFSR_DUE_msg[] =
600 "System bus uncorrectable data ECC error due to prefetch/store-fill";
601 static struct afsr_error_table __cheetah_plus_error_table[] = {
602 { CHAFSR_PERR, CHAFSR_PERR_msg },
603 { CHAFSR_IERR, CHAFSR_IERR_msg },
604 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
605 { CHAFSR_UCU, CHAFSR_UCU_msg },
606 { CHAFSR_UCC, CHAFSR_UCC_msg },
607 { CHAFSR_UE, CHAFSR_UE_msg },
608 { CHAFSR_EDU, CHAFSR_EDU_msg },
609 { CHAFSR_EMU, CHAFSR_EMU_msg },
610 { CHAFSR_WDU, CHAFSR_WDU_msg },
611 { CHAFSR_CPU, CHAFSR_CPU_msg },
612 { CHAFSR_CE, CHAFSR_CE_msg },
613 { CHAFSR_EDC, CHAFSR_EDC_msg },
614 { CHAFSR_EMC, CHAFSR_EMC_msg },
615 { CHAFSR_WDC, CHAFSR_WDC_msg },
616 { CHAFSR_CPC, CHAFSR_CPC_msg },
617 { CHAFSR_TO, CHAFSR_TO_msg },
618 { CHAFSR_BERR, CHAFSR_BERR_msg },
619 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
620 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
621 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
622 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
623 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
624 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
625 /* These two do not update the AFAR. */
626 { CHAFSR_IVC, CHAFSR_IVC_msg },
627 { CHAFSR_IVU, CHAFSR_IVU_msg },
628 { 0, NULL },
629 };
630 static const char JPAFSR_JETO_msg[] =
631 "System interface protocol error, hw timeout caused";
632 static const char JPAFSR_SCE_msg[] =
633 "Parity error on system snoop results";
634 static const char JPAFSR_JEIC_msg[] =
635 "System interface protocol error, illegal command detected";
636 static const char JPAFSR_JEIT_msg[] =
637 "System interface protocol error, illegal ADTYPE detected";
638 static const char JPAFSR_OM_msg[] =
639 "Out of range memory error has occurred";
640 static const char JPAFSR_ETP_msg[] =
641 "Parity error on L2 cache tag SRAM";
642 static const char JPAFSR_UMS_msg[] =
643 "Error due to unsupported store";
644 static const char JPAFSR_RUE_msg[] =
645 "Uncorrectable ECC error from remote cache/memory";
646 static const char JPAFSR_RCE_msg[] =
647 "Correctable ECC error from remote cache/memory";
648 static const char JPAFSR_BP_msg[] =
649 "JBUS parity error on returned read data";
650 static const char JPAFSR_WBP_msg[] =
651 "JBUS parity error on data for writeback or block store";
652 static const char JPAFSR_FRC_msg[] =
653 "Foreign read to DRAM incurring correctable ECC error";
654 static const char JPAFSR_FRU_msg[] =
655 "Foreign read to DRAM incurring uncorrectable ECC error";
656 static struct afsr_error_table __jalapeno_error_table[] = {
657 { JPAFSR_JETO, JPAFSR_JETO_msg },
658 { JPAFSR_SCE, JPAFSR_SCE_msg },
659 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
660 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
661 { CHAFSR_PERR, CHAFSR_PERR_msg },
662 { CHAFSR_IERR, CHAFSR_IERR_msg },
663 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
664 { CHAFSR_UCU, CHAFSR_UCU_msg },
665 { CHAFSR_UCC, CHAFSR_UCC_msg },
666 { CHAFSR_UE, CHAFSR_UE_msg },
667 { CHAFSR_EDU, CHAFSR_EDU_msg },
668 { JPAFSR_OM, JPAFSR_OM_msg },
669 { CHAFSR_WDU, CHAFSR_WDU_msg },
670 { CHAFSR_CPU, CHAFSR_CPU_msg },
671 { CHAFSR_CE, CHAFSR_CE_msg },
672 { CHAFSR_EDC, CHAFSR_EDC_msg },
673 { JPAFSR_ETP, JPAFSR_ETP_msg },
674 { CHAFSR_WDC, CHAFSR_WDC_msg },
675 { CHAFSR_CPC, CHAFSR_CPC_msg },
676 { CHAFSR_TO, CHAFSR_TO_msg },
677 { CHAFSR_BERR, CHAFSR_BERR_msg },
678 { JPAFSR_UMS, JPAFSR_UMS_msg },
679 { JPAFSR_RUE, JPAFSR_RUE_msg },
680 { JPAFSR_RCE, JPAFSR_RCE_msg },
681 { JPAFSR_BP, JPAFSR_BP_msg },
682 { JPAFSR_WBP, JPAFSR_WBP_msg },
683 { JPAFSR_FRC, JPAFSR_FRC_msg },
684 { JPAFSR_FRU, JPAFSR_FRU_msg },
685 /* These two do not update the AFAR. */
686 { CHAFSR_IVU, CHAFSR_IVU_msg },
687 { 0, NULL },
688 };
689 static struct afsr_error_table *cheetah_error_table;
690 static unsigned long cheetah_afsr_errors;
691
692 /* This is allocated at boot time based upon the largest hardware
693 * cpu ID in the system. We allocate two entries per cpu, one for
694 * TL==0 logging and one for TL >= 1 logging.
695 */
696 struct cheetah_err_info *cheetah_error_log;
697
698 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
699 {
700 struct cheetah_err_info *p;
701 int cpu = smp_processor_id();
702
703 if (!cheetah_error_log)
704 return NULL;
705
706 p = cheetah_error_log + (cpu * 2);
707 if ((afsr & CHAFSR_TL1) != 0UL)
708 p++;
709
710 return p;
711 }
712
713 extern unsigned int tl0_icpe[], tl1_icpe[];
714 extern unsigned int tl0_dcpe[], tl1_dcpe[];
715 extern unsigned int tl0_fecc[], tl1_fecc[];
716 extern unsigned int tl0_cee[], tl1_cee[];
717 extern unsigned int tl0_iae[], tl1_iae[];
718 extern unsigned int tl0_dae[], tl1_dae[];
719 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
720 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
721 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
722 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
723 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
724
725 void __init cheetah_ecache_flush_init(void)
726 {
727 unsigned long largest_size, smallest_linesize, order, ver;
728 int node, i, instance;
729
730 /* Scan all cpu device tree nodes, note two values:
731 * 1) largest E-cache size
732 * 2) smallest E-cache line size
733 */
734 largest_size = 0UL;
735 smallest_linesize = ~0UL;
736
737 instance = 0;
738 while (!cpu_find_by_instance(instance, &node, NULL)) {
739 unsigned long val;
740
741 val = prom_getintdefault(node, "ecache-size",
742 (2 * 1024 * 1024));
743 if (val > largest_size)
744 largest_size = val;
745 val = prom_getintdefault(node, "ecache-line-size", 64);
746 if (val < smallest_linesize)
747 smallest_linesize = val;
748 instance++;
749 }
750
751 if (largest_size == 0UL || smallest_linesize == ~0UL) {
752 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
753 "parameters.\n");
754 prom_halt();
755 }
756
757 ecache_flush_size = (2 * largest_size);
758 ecache_flush_linesize = smallest_linesize;
759
760 /* Discover a physically contiguous chunk of physical
761 * memory in 'sp_banks' of size ecache_flush_size calculated
762 * above. Store the physical base of this area at
763 * ecache_flush_physbase.
764 */
765 for (node = 0; ; node++) {
766 if (sp_banks[node].num_bytes == 0)
767 break;
768 if (sp_banks[node].num_bytes >= ecache_flush_size) {
769 ecache_flush_physbase = sp_banks[node].base_addr;
770 break;
771 }
772 }
773
774 /* Note: Zero would be a valid value of ecache_flush_physbase so
775 * don't use that as the success test. :-)
776 */
777 if (sp_banks[node].num_bytes == 0) {
778 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
779 "contiguous physical memory.\n", ecache_flush_size);
780 prom_halt();
781 }
782
783 /* Now allocate error trap reporting scoreboard. */
784 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
785 for (order = 0; order < MAX_ORDER; order++) {
786 if ((PAGE_SIZE << order) >= node)
787 break;
788 }
789 cheetah_error_log = (struct cheetah_err_info *)
790 __get_free_pages(GFP_KERNEL, order);
791 if (!cheetah_error_log) {
792 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
793 "error logging scoreboard (%d bytes).\n", node);
794 prom_halt();
795 }
796 memset(cheetah_error_log, 0, PAGE_SIZE << order);
797
798 /* Mark all AFSRs as invalid so that the trap handler will
799 * log new new information there.
800 */
801 for (i = 0; i < 2 * NR_CPUS; i++)
802 cheetah_error_log[i].afsr = CHAFSR_INVALID;
803
804 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
805 if ((ver >> 32) == 0x003e0016) {
806 cheetah_error_table = &__jalapeno_error_table[0];
807 cheetah_afsr_errors = JPAFSR_ERRORS;
808 } else if ((ver >> 32) == 0x003e0015) {
809 cheetah_error_table = &__cheetah_plus_error_table[0];
810 cheetah_afsr_errors = CHPAFSR_ERRORS;
811 } else {
812 cheetah_error_table = &__cheetah_error_table[0];
813 cheetah_afsr_errors = CHAFSR_ERRORS;
814 }
815
816 /* Now patch trap tables. */
817 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
818 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
819 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
820 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
821 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
822 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
823 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
824 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
825 if (tlb_type == cheetah_plus) {
826 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
827 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
828 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
829 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
830 }
831 flushi(PAGE_OFFSET);
832 }
833
834 static void cheetah_flush_ecache(void)
835 {
836 unsigned long flush_base = ecache_flush_physbase;
837 unsigned long flush_linesize = ecache_flush_linesize;
838 unsigned long flush_size = ecache_flush_size;
839
840 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
841 " bne,pt %%xcc, 1b\n\t"
842 " ldxa [%2 + %0] %3, %%g0\n\t"
843 : "=&r" (flush_size)
844 : "0" (flush_size), "r" (flush_base),
845 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
846 }
847
848 static void cheetah_flush_ecache_line(unsigned long physaddr)
849 {
850 unsigned long alias;
851
852 physaddr &= ~(8UL - 1UL);
853 physaddr = (ecache_flush_physbase +
854 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
855 alias = physaddr + (ecache_flush_size >> 1UL);
856 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
857 "ldxa [%1] %2, %%g0\n\t"
858 "membar #Sync"
859 : /* no outputs */
860 : "r" (physaddr), "r" (alias),
861 "i" (ASI_PHYS_USE_EC));
862 }
863
864 /* Unfortunately, the diagnostic access to the I-cache tags we need to
865 * use to clear the thing interferes with I-cache coherency transactions.
866 *
867 * So we must only flush the I-cache when it is disabled.
868 */
869 static void __cheetah_flush_icache(void)
870 {
871 unsigned int icache_size, icache_line_size;
872 unsigned long addr;
873
874 icache_size = local_cpu_data().icache_size;
875 icache_line_size = local_cpu_data().icache_line_size;
876
877 /* Clear the valid bits in all the tags. */
878 for (addr = 0; addr < icache_size; addr += icache_line_size) {
879 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
880 "membar #Sync"
881 : /* no outputs */
882 : "r" (addr | (2 << 3)),
883 "i" (ASI_IC_TAG));
884 }
885 }
886
887 static void cheetah_flush_icache(void)
888 {
889 unsigned long dcu_save;
890
891 /* Save current DCU, disable I-cache. */
892 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
893 "or %0, %2, %%g1\n\t"
894 "stxa %%g1, [%%g0] %1\n\t"
895 "membar #Sync"
896 : "=r" (dcu_save)
897 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
898 : "g1");
899
900 __cheetah_flush_icache();
901
902 /* Restore DCU register */
903 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
904 "membar #Sync"
905 : /* no outputs */
906 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
907 }
908
909 static void cheetah_flush_dcache(void)
910 {
911 unsigned int dcache_size, dcache_line_size;
912 unsigned long addr;
913
914 dcache_size = local_cpu_data().dcache_size;
915 dcache_line_size = local_cpu_data().dcache_line_size;
916
917 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
918 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
919 "membar #Sync"
920 : /* no outputs */
921 : "r" (addr), "i" (ASI_DCACHE_TAG));
922 }
923 }
924
925 /* In order to make the even parity correct we must do two things.
926 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
927 * Next, we clear out all 32-bytes of data for that line. Data of
928 * all-zero + tag parity value of zero == correct parity.
929 */
930 static void cheetah_plus_zap_dcache_parity(void)
931 {
932 unsigned int dcache_size, dcache_line_size;
933 unsigned long addr;
934
935 dcache_size = local_cpu_data().dcache_size;
936 dcache_line_size = local_cpu_data().dcache_line_size;
937
938 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
939 unsigned long tag = (addr >> 14);
940 unsigned long line;
941
942 __asm__ __volatile__("membar #Sync\n\t"
943 "stxa %0, [%1] %2\n\t"
944 "membar #Sync"
945 : /* no outputs */
946 : "r" (tag), "r" (addr),
947 "i" (ASI_DCACHE_UTAG));
948 for (line = addr; line < addr + dcache_line_size; line += 8)
949 __asm__ __volatile__("membar #Sync\n\t"
950 "stxa %%g0, [%0] %1\n\t"
951 "membar #Sync"
952 : /* no outputs */
953 : "r" (line),
954 "i" (ASI_DCACHE_DATA));
955 }
956 }
957
958 /* Conversion tables used to frob Cheetah AFSR syndrome values into
959 * something palatable to the memory controller driver get_unumber
960 * routine.
961 */
962 #define MT0 137
963 #define MT1 138
964 #define MT2 139
965 #define NONE 254
966 #define MTC0 140
967 #define MTC1 141
968 #define MTC2 142
969 #define MTC3 143
970 #define C0 128
971 #define C1 129
972 #define C2 130
973 #define C3 131
974 #define C4 132
975 #define C5 133
976 #define C6 134
977 #define C7 135
978 #define C8 136
979 #define M2 144
980 #define M3 145
981 #define M4 146
982 #define M 147
983 static unsigned char cheetah_ecc_syntab[] = {
984 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
985 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
986 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
987 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
988 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
989 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
990 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
991 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
992 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
993 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
994 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
995 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
996 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
997 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
998 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
999 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1000 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1001 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1002 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1003 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1004 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1005 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1006 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1007 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1008 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1009 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1010 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1011 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1012 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1013 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1014 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1015 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1016 };
1017 static unsigned char cheetah_mtag_syntab[] = {
1018 NONE, MTC0,
1019 MTC1, NONE,
1020 MTC2, NONE,
1021 NONE, MT0,
1022 MTC3, NONE,
1023 NONE, MT1,
1024 NONE, MT2,
1025 NONE, NONE
1026 };
1027
1028 /* Return the highest priority error conditon mentioned. */
1029 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1030 {
1031 unsigned long tmp = 0;
1032 int i;
1033
1034 for (i = 0; cheetah_error_table[i].mask; i++) {
1035 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1036 return tmp;
1037 }
1038 return tmp;
1039 }
1040
1041 static const char *cheetah_get_string(unsigned long bit)
1042 {
1043 int i;
1044
1045 for (i = 0; cheetah_error_table[i].mask; i++) {
1046 if ((bit & cheetah_error_table[i].mask) != 0UL)
1047 return cheetah_error_table[i].name;
1048 }
1049 return "???";
1050 }
1051
1052 extern int chmc_getunumber(int, unsigned long, char *, int);
1053
1054 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1055 unsigned long afsr, unsigned long afar, int recoverable)
1056 {
1057 unsigned long hipri;
1058 char unum[256];
1059
1060 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1061 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1062 afsr, afar,
1063 (afsr & CHAFSR_TL1) ? 1 : 0);
1064 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1065 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1066 regs->tpc, regs->tnpc, regs->tstate);
1067 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1068 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1069 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1070 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1071 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1072 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1073 hipri = cheetah_get_hipri(afsr);
1074 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1075 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1076 hipri, cheetah_get_string(hipri));
1077
1078 /* Try to get unumber if relevant. */
1079 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1080 CHAFSR_CPC | CHAFSR_CPU | \
1081 CHAFSR_UE | CHAFSR_CE | \
1082 CHAFSR_EDC | CHAFSR_EDU | \
1083 CHAFSR_UCC | CHAFSR_UCU | \
1084 CHAFSR_WDU | CHAFSR_WDC)
1085 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1086 if (afsr & ESYND_ERRORS) {
1087 int syndrome;
1088 int ret;
1089
1090 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1091 syndrome = cheetah_ecc_syntab[syndrome];
1092 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1093 if (ret != -1)
1094 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1095 (recoverable ? KERN_WARNING : KERN_CRIT),
1096 smp_processor_id(), unum);
1097 } else if (afsr & MSYND_ERRORS) {
1098 int syndrome;
1099 int ret;
1100
1101 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1102 syndrome = cheetah_mtag_syntab[syndrome];
1103 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1104 if (ret != -1)
1105 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1106 (recoverable ? KERN_WARNING : KERN_CRIT),
1107 smp_processor_id(), unum);
1108 }
1109
1110 /* Now dump the cache snapshots. */
1111 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1112 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1113 (int) info->dcache_index,
1114 info->dcache_tag,
1115 info->dcache_utag,
1116 info->dcache_stag);
1117 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1118 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1119 info->dcache_data[0],
1120 info->dcache_data[1],
1121 info->dcache_data[2],
1122 info->dcache_data[3]);
1123 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1124 "u[%016lx] l[%016lx]\n",
1125 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1126 (int) info->icache_index,
1127 info->icache_tag,
1128 info->icache_utag,
1129 info->icache_stag,
1130 info->icache_upper,
1131 info->icache_lower);
1132 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 info->icache_data[0],
1135 info->icache_data[1],
1136 info->icache_data[2],
1137 info->icache_data[3]);
1138 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1140 info->icache_data[4],
1141 info->icache_data[5],
1142 info->icache_data[6],
1143 info->icache_data[7]);
1144 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1145 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1146 (int) info->ecache_index, info->ecache_tag);
1147 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1148 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1149 info->ecache_data[0],
1150 info->ecache_data[1],
1151 info->ecache_data[2],
1152 info->ecache_data[3]);
1153
1154 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1155 while (afsr != 0UL) {
1156 unsigned long bit = cheetah_get_hipri(afsr);
1157
1158 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1159 (recoverable ? KERN_WARNING : KERN_CRIT),
1160 bit, cheetah_get_string(bit));
1161
1162 afsr &= ~bit;
1163 }
1164
1165 if (!recoverable)
1166 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1167 }
1168
1169 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1170 {
1171 unsigned long afsr, afar;
1172 int ret = 0;
1173
1174 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1175 : "=r" (afsr)
1176 : "i" (ASI_AFSR));
1177 if ((afsr & cheetah_afsr_errors) != 0) {
1178 if (logp != NULL) {
1179 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1180 : "=r" (afar)
1181 : "i" (ASI_AFAR));
1182 logp->afsr = afsr;
1183 logp->afar = afar;
1184 }
1185 ret = 1;
1186 }
1187 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1188 "membar #Sync\n\t"
1189 : : "r" (afsr), "i" (ASI_AFSR));
1190
1191 return ret;
1192 }
1193
1194 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1195 {
1196 struct cheetah_err_info local_snapshot, *p;
1197 int recoverable;
1198
1199 /* Flush E-cache */
1200 cheetah_flush_ecache();
1201
1202 p = cheetah_get_error_log(afsr);
1203 if (!p) {
1204 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1205 afsr, afar);
1206 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1207 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1208 prom_halt();
1209 }
1210
1211 /* Grab snapshot of logged error. */
1212 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1213
1214 /* If the current trap snapshot does not match what the
1215 * trap handler passed along into our args, big trouble.
1216 * In such a case, mark the local copy as invalid.
1217 *
1218 * Else, it matches and we mark the afsr in the non-local
1219 * copy as invalid so we may log new error traps there.
1220 */
1221 if (p->afsr != afsr || p->afar != afar)
1222 local_snapshot.afsr = CHAFSR_INVALID;
1223 else
1224 p->afsr = CHAFSR_INVALID;
1225
1226 cheetah_flush_icache();
1227 cheetah_flush_dcache();
1228
1229 /* Re-enable I-cache/D-cache */
1230 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1231 "or %%g1, %1, %%g1\n\t"
1232 "stxa %%g1, [%%g0] %0\n\t"
1233 "membar #Sync"
1234 : /* no outputs */
1235 : "i" (ASI_DCU_CONTROL_REG),
1236 "i" (DCU_DC | DCU_IC)
1237 : "g1");
1238
1239 /* Re-enable error reporting */
1240 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1241 "or %%g1, %1, %%g1\n\t"
1242 "stxa %%g1, [%%g0] %0\n\t"
1243 "membar #Sync"
1244 : /* no outputs */
1245 : "i" (ASI_ESTATE_ERROR_EN),
1246 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1247 : "g1");
1248
1249 /* Decide if we can continue after handling this trap and
1250 * logging the error.
1251 */
1252 recoverable = 1;
1253 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1254 recoverable = 0;
1255
1256 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1257 * error was logged while we had error reporting traps disabled.
1258 */
1259 if (cheetah_recheck_errors(&local_snapshot)) {
1260 unsigned long new_afsr = local_snapshot.afsr;
1261
1262 /* If we got a new asynchronous error, die... */
1263 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1264 CHAFSR_WDU | CHAFSR_CPU |
1265 CHAFSR_IVU | CHAFSR_UE |
1266 CHAFSR_BERR | CHAFSR_TO))
1267 recoverable = 0;
1268 }
1269
1270 /* Log errors. */
1271 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1272
1273 if (!recoverable)
1274 panic("Irrecoverable Fast-ECC error trap.\n");
1275
1276 /* Flush E-cache to kick the error trap handlers out. */
1277 cheetah_flush_ecache();
1278 }
1279
1280 /* Try to fix a correctable error by pushing the line out from
1281 * the E-cache. Recheck error reporting registers to see if the
1282 * problem is intermittent.
1283 */
1284 static int cheetah_fix_ce(unsigned long physaddr)
1285 {
1286 unsigned long orig_estate;
1287 unsigned long alias1, alias2;
1288 int ret;
1289
1290 /* Make sure correctable error traps are disabled. */
1291 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1292 "andn %0, %1, %%g1\n\t"
1293 "stxa %%g1, [%%g0] %2\n\t"
1294 "membar #Sync"
1295 : "=&r" (orig_estate)
1296 : "i" (ESTATE_ERROR_CEEN),
1297 "i" (ASI_ESTATE_ERROR_EN)
1298 : "g1");
1299
1300 /* We calculate alias addresses that will force the
1301 * cache line in question out of the E-cache. Then
1302 * we bring it back in with an atomic instruction so
1303 * that we get it in some modified/exclusive state,
1304 * then we displace it again to try and get proper ECC
1305 * pushed back into the system.
1306 */
1307 physaddr &= ~(8UL - 1UL);
1308 alias1 = (ecache_flush_physbase +
1309 (physaddr & ((ecache_flush_size >> 1) - 1)));
1310 alias2 = alias1 + (ecache_flush_size >> 1);
1311 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1312 "ldxa [%1] %3, %%g0\n\t"
1313 "casxa [%2] %3, %%g0, %%g0\n\t"
1314 "membar #StoreLoad | #StoreStore\n\t"
1315 "ldxa [%0] %3, %%g0\n\t"
1316 "ldxa [%1] %3, %%g0\n\t"
1317 "membar #Sync"
1318 : /* no outputs */
1319 : "r" (alias1), "r" (alias2),
1320 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1321
1322 /* Did that trigger another error? */
1323 if (cheetah_recheck_errors(NULL)) {
1324 /* Try one more time. */
1325 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1326 "membar #Sync"
1327 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1328 if (cheetah_recheck_errors(NULL))
1329 ret = 2;
1330 else
1331 ret = 1;
1332 } else {
1333 /* No new error, intermittent problem. */
1334 ret = 0;
1335 }
1336
1337 /* Restore error enables. */
1338 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1339 "membar #Sync"
1340 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1341
1342 return ret;
1343 }
1344
1345 /* Return non-zero if PADDR is a valid physical memory address. */
1346 static int cheetah_check_main_memory(unsigned long paddr)
1347 {
1348 int i;
1349
1350 for (i = 0; ; i++) {
1351 if (sp_banks[i].num_bytes == 0)
1352 break;
1353 if (paddr >= sp_banks[i].base_addr &&
1354 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1355 return 1;
1356 }
1357 return 0;
1358 }
1359
1360 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1361 {
1362 struct cheetah_err_info local_snapshot, *p;
1363 int recoverable, is_memory;
1364
1365 p = cheetah_get_error_log(afsr);
1366 if (!p) {
1367 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1368 afsr, afar);
1369 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1370 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1371 prom_halt();
1372 }
1373
1374 /* Grab snapshot of logged error. */
1375 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1376
1377 /* If the current trap snapshot does not match what the
1378 * trap handler passed along into our args, big trouble.
1379 * In such a case, mark the local copy as invalid.
1380 *
1381 * Else, it matches and we mark the afsr in the non-local
1382 * copy as invalid so we may log new error traps there.
1383 */
1384 if (p->afsr != afsr || p->afar != afar)
1385 local_snapshot.afsr = CHAFSR_INVALID;
1386 else
1387 p->afsr = CHAFSR_INVALID;
1388
1389 is_memory = cheetah_check_main_memory(afar);
1390
1391 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1392 /* XXX Might want to log the results of this operation
1393 * XXX somewhere... -DaveM
1394 */
1395 cheetah_fix_ce(afar);
1396 }
1397
1398 {
1399 int flush_all, flush_line;
1400
1401 flush_all = flush_line = 0;
1402 if ((afsr & CHAFSR_EDC) != 0UL) {
1403 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1404 flush_line = 1;
1405 else
1406 flush_all = 1;
1407 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1408 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1409 flush_line = 1;
1410 else
1411 flush_all = 1;
1412 }
1413
1414 /* Trap handler only disabled I-cache, flush it. */
1415 cheetah_flush_icache();
1416
1417 /* Re-enable I-cache */
1418 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1419 "or %%g1, %1, %%g1\n\t"
1420 "stxa %%g1, [%%g0] %0\n\t"
1421 "membar #Sync"
1422 : /* no outputs */
1423 : "i" (ASI_DCU_CONTROL_REG),
1424 "i" (DCU_IC)
1425 : "g1");
1426
1427 if (flush_all)
1428 cheetah_flush_ecache();
1429 else if (flush_line)
1430 cheetah_flush_ecache_line(afar);
1431 }
1432
1433 /* Re-enable error reporting */
1434 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1435 "or %%g1, %1, %%g1\n\t"
1436 "stxa %%g1, [%%g0] %0\n\t"
1437 "membar #Sync"
1438 : /* no outputs */
1439 : "i" (ASI_ESTATE_ERROR_EN),
1440 "i" (ESTATE_ERROR_CEEN)
1441 : "g1");
1442
1443 /* Decide if we can continue after handling this trap and
1444 * logging the error.
1445 */
1446 recoverable = 1;
1447 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1448 recoverable = 0;
1449
1450 /* Re-check AFSR/AFAR */
1451 (void) cheetah_recheck_errors(&local_snapshot);
1452
1453 /* Log errors. */
1454 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1455
1456 if (!recoverable)
1457 panic("Irrecoverable Correctable-ECC error trap.\n");
1458 }
1459
1460 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1461 {
1462 struct cheetah_err_info local_snapshot, *p;
1463 int recoverable, is_memory;
1464
1465 #ifdef CONFIG_PCI
1466 /* Check for the special PCI poke sequence. */
1467 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1468 cheetah_flush_icache();
1469 cheetah_flush_dcache();
1470
1471 /* Re-enable I-cache/D-cache */
1472 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1473 "or %%g1, %1, %%g1\n\t"
1474 "stxa %%g1, [%%g0] %0\n\t"
1475 "membar #Sync"
1476 : /* no outputs */
1477 : "i" (ASI_DCU_CONTROL_REG),
1478 "i" (DCU_DC | DCU_IC)
1479 : "g1");
1480
1481 /* Re-enable error reporting */
1482 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1483 "or %%g1, %1, %%g1\n\t"
1484 "stxa %%g1, [%%g0] %0\n\t"
1485 "membar #Sync"
1486 : /* no outputs */
1487 : "i" (ASI_ESTATE_ERROR_EN),
1488 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1489 : "g1");
1490
1491 (void) cheetah_recheck_errors(NULL);
1492
1493 pci_poke_faulted = 1;
1494 regs->tpc += 4;
1495 regs->tnpc = regs->tpc + 4;
1496 return;
1497 }
1498 #endif
1499
1500 p = cheetah_get_error_log(afsr);
1501 if (!p) {
1502 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1503 afsr, afar);
1504 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1505 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1506 prom_halt();
1507 }
1508
1509 /* Grab snapshot of logged error. */
1510 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1511
1512 /* If the current trap snapshot does not match what the
1513 * trap handler passed along into our args, big trouble.
1514 * In such a case, mark the local copy as invalid.
1515 *
1516 * Else, it matches and we mark the afsr in the non-local
1517 * copy as invalid so we may log new error traps there.
1518 */
1519 if (p->afsr != afsr || p->afar != afar)
1520 local_snapshot.afsr = CHAFSR_INVALID;
1521 else
1522 p->afsr = CHAFSR_INVALID;
1523
1524 is_memory = cheetah_check_main_memory(afar);
1525
1526 {
1527 int flush_all, flush_line;
1528
1529 flush_all = flush_line = 0;
1530 if ((afsr & CHAFSR_EDU) != 0UL) {
1531 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1532 flush_line = 1;
1533 else
1534 flush_all = 1;
1535 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1536 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1537 flush_line = 1;
1538 else
1539 flush_all = 1;
1540 }
1541
1542 cheetah_flush_icache();
1543 cheetah_flush_dcache();
1544
1545 /* Re-enable I/D caches */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1549 "membar #Sync"
1550 : /* no outputs */
1551 : "i" (ASI_DCU_CONTROL_REG),
1552 "i" (DCU_IC | DCU_DC)
1553 : "g1");
1554
1555 if (flush_all)
1556 cheetah_flush_ecache();
1557 else if (flush_line)
1558 cheetah_flush_ecache_line(afar);
1559 }
1560
1561 /* Re-enable error reporting */
1562 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1563 "or %%g1, %1, %%g1\n\t"
1564 "stxa %%g1, [%%g0] %0\n\t"
1565 "membar #Sync"
1566 : /* no outputs */
1567 : "i" (ASI_ESTATE_ERROR_EN),
1568 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1569 : "g1");
1570
1571 /* Decide if we can continue after handling this trap and
1572 * logging the error.
1573 */
1574 recoverable = 1;
1575 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1576 recoverable = 0;
1577
1578 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1579 * error was logged while we had error reporting traps disabled.
1580 */
1581 if (cheetah_recheck_errors(&local_snapshot)) {
1582 unsigned long new_afsr = local_snapshot.afsr;
1583
1584 /* If we got a new asynchronous error, die... */
1585 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1586 CHAFSR_WDU | CHAFSR_CPU |
1587 CHAFSR_IVU | CHAFSR_UE |
1588 CHAFSR_BERR | CHAFSR_TO))
1589 recoverable = 0;
1590 }
1591
1592 /* Log errors. */
1593 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1594
1595 /* "Recoverable" here means we try to yank the page from ever
1596 * being newly used again. This depends upon a few things:
1597 * 1) Must be main memory, and AFAR must be valid.
1598 * 2) If we trapped from user, OK.
1599 * 3) Else, if we trapped from kernel we must find exception
1600 * table entry (ie. we have to have been accessing user
1601 * space).
1602 *
1603 * If AFAR is not in main memory, or we trapped from kernel
1604 * and cannot find an exception table entry, it is unacceptable
1605 * to try and continue.
1606 */
1607 if (recoverable && is_memory) {
1608 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1609 /* OK, usermode access. */
1610 recoverable = 1;
1611 } else {
1612 const struct exception_table_entry *entry;
1613
1614 entry = search_exception_tables(regs->tpc);
1615 if (entry) {
1616 /* OK, kernel access to userspace. */
1617 recoverable = 1;
1618
1619 } else {
1620 /* BAD, privileged state is corrupted. */
1621 recoverable = 0;
1622 }
1623
1624 if (recoverable) {
1625 if (pfn_valid(afar >> PAGE_SHIFT))
1626 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1627 else
1628 recoverable = 0;
1629
1630 /* Only perform fixup if we still have a
1631 * recoverable condition.
1632 */
1633 if (recoverable) {
1634 regs->tpc = entry->fixup;
1635 regs->tnpc = regs->tpc + 4;
1636 }
1637 }
1638 }
1639 } else {
1640 recoverable = 0;
1641 }
1642
1643 if (!recoverable)
1644 panic("Irrecoverable deferred error trap.\n");
1645 }
1646
1647 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1648 *
1649 * Bit0: 0=dcache,1=icache
1650 * Bit1: 0=recoverable,1=unrecoverable
1651 *
1652 * The hardware has disabled both the I-cache and D-cache in
1653 * the %dcr register.
1654 */
1655 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1656 {
1657 if (type & 0x1)
1658 __cheetah_flush_icache();
1659 else
1660 cheetah_plus_zap_dcache_parity();
1661 cheetah_flush_dcache();
1662
1663 /* Re-enable I-cache/D-cache */
1664 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1665 "or %%g1, %1, %%g1\n\t"
1666 "stxa %%g1, [%%g0] %0\n\t"
1667 "membar #Sync"
1668 : /* no outputs */
1669 : "i" (ASI_DCU_CONTROL_REG),
1670 "i" (DCU_DC | DCU_IC)
1671 : "g1");
1672
1673 if (type & 0x2) {
1674 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1675 smp_processor_id(),
1676 (type & 0x1) ? 'I' : 'D',
1677 regs->tpc);
1678 panic("Irrecoverable Cheetah+ parity error.");
1679 }
1680
1681 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1682 smp_processor_id(),
1683 (type & 0x1) ? 'I' : 'D',
1684 regs->tpc);
1685 }
1686
1687 void do_fpe_common(struct pt_regs *regs)
1688 {
1689 if (regs->tstate & TSTATE_PRIV) {
1690 regs->tpc = regs->tnpc;
1691 regs->tnpc += 4;
1692 } else {
1693 unsigned long fsr = current_thread_info()->xfsr[0];
1694 siginfo_t info;
1695
1696 if (test_thread_flag(TIF_32BIT)) {
1697 regs->tpc &= 0xffffffff;
1698 regs->tnpc &= 0xffffffff;
1699 }
1700 info.si_signo = SIGFPE;
1701 info.si_errno = 0;
1702 info.si_addr = (void __user *)regs->tpc;
1703 info.si_trapno = 0;
1704 info.si_code = __SI_FAULT;
1705 if ((fsr & 0x1c000) == (1 << 14)) {
1706 if (fsr & 0x10)
1707 info.si_code = FPE_FLTINV;
1708 else if (fsr & 0x08)
1709 info.si_code = FPE_FLTOVF;
1710 else if (fsr & 0x04)
1711 info.si_code = FPE_FLTUND;
1712 else if (fsr & 0x02)
1713 info.si_code = FPE_FLTDIV;
1714 else if (fsr & 0x01)
1715 info.si_code = FPE_FLTRES;
1716 }
1717 force_sig_info(SIGFPE, &info, current);
1718 }
1719 }
1720
1721 void do_fpieee(struct pt_regs *regs)
1722 {
1723 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1724 0, 0x24, SIGFPE) == NOTIFY_STOP)
1725 return;
1726
1727 do_fpe_common(regs);
1728 }
1729
1730 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1731
1732 void do_fpother(struct pt_regs *regs)
1733 {
1734 struct fpustate *f = FPUSTATE;
1735 int ret = 0;
1736
1737 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1738 0, 0x25, SIGFPE) == NOTIFY_STOP)
1739 return;
1740
1741 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1742 case (2 << 14): /* unfinished_FPop */
1743 case (3 << 14): /* unimplemented_FPop */
1744 ret = do_mathemu(regs, f);
1745 break;
1746 }
1747 if (ret)
1748 return;
1749 do_fpe_common(regs);
1750 }
1751
1752 void do_tof(struct pt_regs *regs)
1753 {
1754 siginfo_t info;
1755
1756 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1757 0, 0x26, SIGEMT) == NOTIFY_STOP)
1758 return;
1759
1760 if (regs->tstate & TSTATE_PRIV)
1761 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1762 if (test_thread_flag(TIF_32BIT)) {
1763 regs->tpc &= 0xffffffff;
1764 regs->tnpc &= 0xffffffff;
1765 }
1766 info.si_signo = SIGEMT;
1767 info.si_errno = 0;
1768 info.si_code = EMT_TAGOVF;
1769 info.si_addr = (void __user *)regs->tpc;
1770 info.si_trapno = 0;
1771 force_sig_info(SIGEMT, &info, current);
1772 }
1773
1774 void do_div0(struct pt_regs *regs)
1775 {
1776 siginfo_t info;
1777
1778 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1779 0, 0x28, SIGFPE) == NOTIFY_STOP)
1780 return;
1781
1782 if (regs->tstate & TSTATE_PRIV)
1783 die_if_kernel("TL0: Kernel divide by zero.", regs);
1784 if (test_thread_flag(TIF_32BIT)) {
1785 regs->tpc &= 0xffffffff;
1786 regs->tnpc &= 0xffffffff;
1787 }
1788 info.si_signo = SIGFPE;
1789 info.si_errno = 0;
1790 info.si_code = FPE_INTDIV;
1791 info.si_addr = (void __user *)regs->tpc;
1792 info.si_trapno = 0;
1793 force_sig_info(SIGFPE, &info, current);
1794 }
1795
1796 void instruction_dump (unsigned int *pc)
1797 {
1798 int i;
1799
1800 if ((((unsigned long) pc) & 3))
1801 return;
1802
1803 printk("Instruction DUMP:");
1804 for (i = -3; i < 6; i++)
1805 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1806 printk("\n");
1807 }
1808
1809 static void user_instruction_dump (unsigned int __user *pc)
1810 {
1811 int i;
1812 unsigned int buf[9];
1813
1814 if ((((unsigned long) pc) & 3))
1815 return;
1816
1817 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1818 return;
1819
1820 printk("Instruction DUMP:");
1821 for (i = 0; i < 9; i++)
1822 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1823 printk("\n");
1824 }
1825
1826 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1827 {
1828 unsigned long pc, fp, thread_base, ksp;
1829 struct thread_info *tp = tsk->thread_info;
1830 struct reg_window *rw;
1831 int count = 0;
1832
1833 ksp = (unsigned long) _ksp;
1834
1835 if (tp == current_thread_info())
1836 flushw_all();
1837
1838 fp = ksp + STACK_BIAS;
1839 thread_base = (unsigned long) tp;
1840
1841 printk("Call Trace:");
1842 #ifdef CONFIG_KALLSYMS
1843 printk("\n");
1844 #endif
1845 do {
1846 /* Bogus frame pointer? */
1847 if (fp < (thread_base + sizeof(struct thread_info)) ||
1848 fp >= (thread_base + THREAD_SIZE))
1849 break;
1850 rw = (struct reg_window *)fp;
1851 pc = rw->ins[7];
1852 printk(" [%016lx] ", pc);
1853 print_symbol("%s\n", pc);
1854 fp = rw->ins[6] + STACK_BIAS;
1855 } while (++count < 16);
1856 #ifndef CONFIG_KALLSYMS
1857 printk("\n");
1858 #endif
1859 }
1860
1861 void dump_stack(void)
1862 {
1863 unsigned long *ksp;
1864
1865 __asm__ __volatile__("mov %%fp, %0"
1866 : "=r" (ksp));
1867 show_stack(current, ksp);
1868 }
1869
1870 EXPORT_SYMBOL(dump_stack);
1871
1872 static inline int is_kernel_stack(struct task_struct *task,
1873 struct reg_window *rw)
1874 {
1875 unsigned long rw_addr = (unsigned long) rw;
1876 unsigned long thread_base, thread_end;
1877
1878 if (rw_addr < PAGE_OFFSET) {
1879 if (task != &init_task)
1880 return 0;
1881 }
1882
1883 thread_base = (unsigned long) task->thread_info;
1884 thread_end = thread_base + sizeof(union thread_union);
1885 if (rw_addr >= thread_base &&
1886 rw_addr < thread_end &&
1887 !(rw_addr & 0x7UL))
1888 return 1;
1889
1890 return 0;
1891 }
1892
1893 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1894 {
1895 unsigned long fp = rw->ins[6];
1896
1897 if (!fp)
1898 return NULL;
1899
1900 return (struct reg_window *) (fp + STACK_BIAS);
1901 }
1902
1903 void die_if_kernel(char *str, struct pt_regs *regs)
1904 {
1905 static int die_counter;
1906 extern void __show_regs(struct pt_regs * regs);
1907 extern void smp_report_regs(void);
1908 int count = 0;
1909
1910 /* Amuse the user. */
1911 printk(
1912 " \\|/ ____ \\|/\n"
1913 " \"@'/ .. \\`@\"\n"
1914 " /_| \\__/ |_\\\n"
1915 " \\__U_/\n");
1916
1917 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1918 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1919 __asm__ __volatile__("flushw");
1920 __show_regs(regs);
1921 if (regs->tstate & TSTATE_PRIV) {
1922 struct reg_window *rw = (struct reg_window *)
1923 (regs->u_regs[UREG_FP] + STACK_BIAS);
1924
1925 /* Stop the back trace when we hit userland or we
1926 * find some badly aligned kernel stack.
1927 */
1928 while (rw &&
1929 count++ < 30&&
1930 is_kernel_stack(current, rw)) {
1931 printk("Caller[%016lx]", rw->ins[7]);
1932 print_symbol(": %s", rw->ins[7]);
1933 printk("\n");
1934
1935 rw = kernel_stack_up(rw);
1936 }
1937 instruction_dump ((unsigned int *) regs->tpc);
1938 } else {
1939 if (test_thread_flag(TIF_32BIT)) {
1940 regs->tpc &= 0xffffffff;
1941 regs->tnpc &= 0xffffffff;
1942 }
1943 user_instruction_dump ((unsigned int __user *) regs->tpc);
1944 }
1945 #ifdef CONFIG_SMP
1946 smp_report_regs();
1947 #endif
1948
1949 if (regs->tstate & TSTATE_PRIV)
1950 do_exit(SIGKILL);
1951 do_exit(SIGSEGV);
1952 }
1953
1954 extern int handle_popc(u32 insn, struct pt_regs *regs);
1955 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1956
1957 void do_illegal_instruction(struct pt_regs *regs)
1958 {
1959 unsigned long pc = regs->tpc;
1960 unsigned long tstate = regs->tstate;
1961 u32 insn;
1962 siginfo_t info;
1963
1964 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1965 0, 0x10, SIGILL) == NOTIFY_STOP)
1966 return;
1967
1968 if (tstate & TSTATE_PRIV)
1969 die_if_kernel("Kernel illegal instruction", regs);
1970 if (test_thread_flag(TIF_32BIT))
1971 pc = (u32)pc;
1972 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1973 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1974 if (handle_popc(insn, regs))
1975 return;
1976 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1977 if (handle_ldf_stq(insn, regs))
1978 return;
1979 }
1980 }
1981 info.si_signo = SIGILL;
1982 info.si_errno = 0;
1983 info.si_code = ILL_ILLOPC;
1984 info.si_addr = (void __user *)pc;
1985 info.si_trapno = 0;
1986 force_sig_info(SIGILL, &info, current);
1987 }
1988
1989 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1990 {
1991 siginfo_t info;
1992
1993 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1994 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1995 return;
1996
1997 if (regs->tstate & TSTATE_PRIV) {
1998 extern void kernel_unaligned_trap(struct pt_regs *regs,
1999 unsigned int insn,
2000 unsigned long sfar,
2001 unsigned long sfsr);
2002
2003 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
2004 sfar, sfsr);
2005 return;
2006 }
2007 info.si_signo = SIGBUS;
2008 info.si_errno = 0;
2009 info.si_code = BUS_ADRALN;
2010 info.si_addr = (void __user *)sfar;
2011 info.si_trapno = 0;
2012 force_sig_info(SIGBUS, &info, current);
2013 }
2014
2015 void do_privop(struct pt_regs *regs)
2016 {
2017 siginfo_t info;
2018
2019 if (notify_die(DIE_TRAP, "privileged operation", regs,
2020 0, 0x11, SIGILL) == NOTIFY_STOP)
2021 return;
2022
2023 if (test_thread_flag(TIF_32BIT)) {
2024 regs->tpc &= 0xffffffff;
2025 regs->tnpc &= 0xffffffff;
2026 }
2027 info.si_signo = SIGILL;
2028 info.si_errno = 0;
2029 info.si_code = ILL_PRVOPC;
2030 info.si_addr = (void __user *)regs->tpc;
2031 info.si_trapno = 0;
2032 force_sig_info(SIGILL, &info, current);
2033 }
2034
2035 void do_privact(struct pt_regs *regs)
2036 {
2037 do_privop(regs);
2038 }
2039
2040 /* Trap level 1 stuff or other traps we should never see... */
2041 void do_cee(struct pt_regs *regs)
2042 {
2043 die_if_kernel("TL0: Cache Error Exception", regs);
2044 }
2045
2046 void do_cee_tl1(struct pt_regs *regs)
2047 {
2048 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2049 die_if_kernel("TL1: Cache Error Exception", regs);
2050 }
2051
2052 void do_dae_tl1(struct pt_regs *regs)
2053 {
2054 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2055 die_if_kernel("TL1: Data Access Exception", regs);
2056 }
2057
2058 void do_iae_tl1(struct pt_regs *regs)
2059 {
2060 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2061 die_if_kernel("TL1: Instruction Access Exception", regs);
2062 }
2063
2064 void do_div0_tl1(struct pt_regs *regs)
2065 {
2066 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2067 die_if_kernel("TL1: DIV0 Exception", regs);
2068 }
2069
2070 void do_fpdis_tl1(struct pt_regs *regs)
2071 {
2072 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2073 die_if_kernel("TL1: FPU Disabled", regs);
2074 }
2075
2076 void do_fpieee_tl1(struct pt_regs *regs)
2077 {
2078 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2079 die_if_kernel("TL1: FPU IEEE Exception", regs);
2080 }
2081
2082 void do_fpother_tl1(struct pt_regs *regs)
2083 {
2084 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2085 die_if_kernel("TL1: FPU Other Exception", regs);
2086 }
2087
2088 void do_ill_tl1(struct pt_regs *regs)
2089 {
2090 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2091 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2092 }
2093
2094 void do_irq_tl1(struct pt_regs *regs)
2095 {
2096 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2097 die_if_kernel("TL1: IRQ Exception", regs);
2098 }
2099
2100 void do_lddfmna_tl1(struct pt_regs *regs)
2101 {
2102 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2103 die_if_kernel("TL1: LDDF Exception", regs);
2104 }
2105
2106 void do_stdfmna_tl1(struct pt_regs *regs)
2107 {
2108 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2109 die_if_kernel("TL1: STDF Exception", regs);
2110 }
2111
2112 void do_paw(struct pt_regs *regs)
2113 {
2114 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2115 }
2116
2117 void do_paw_tl1(struct pt_regs *regs)
2118 {
2119 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2120 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2121 }
2122
2123 void do_vaw(struct pt_regs *regs)
2124 {
2125 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2126 }
2127
2128 void do_vaw_tl1(struct pt_regs *regs)
2129 {
2130 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2131 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2132 }
2133
2134 void do_tof_tl1(struct pt_regs *regs)
2135 {
2136 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2137 die_if_kernel("TL1: Tag Overflow Exception", regs);
2138 }
2139
2140 void do_getpsr(struct pt_regs *regs)
2141 {
2142 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2143 regs->tpc = regs->tnpc;
2144 regs->tnpc += 4;
2145 if (test_thread_flag(TIF_32BIT)) {
2146 regs->tpc &= 0xffffffff;
2147 regs->tnpc &= 0xffffffff;
2148 }
2149 }
2150
2151 extern void thread_info_offsets_are_bolixed_dave(void);
2152
2153 /* Only invoked on boot processor. */
2154 void __init trap_init(void)
2155 {
2156 /* Compile time sanity check. */
2157 if (TI_TASK != offsetof(struct thread_info, task) ||
2158 TI_FLAGS != offsetof(struct thread_info, flags) ||
2159 TI_CPU != offsetof(struct thread_info, cpu) ||
2160 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2161 TI_KSP != offsetof(struct thread_info, ksp) ||
2162 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2163 TI_KREGS != offsetof(struct thread_info, kregs) ||
2164 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2165 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2166 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2167 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2168 TI_GSR != offsetof(struct thread_info, gsr) ||
2169 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2170 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2171 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2172 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2173 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2174 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2175 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2176 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2177 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2178 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2179 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2180 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2181 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2182 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2183 (TI_FPREGS & (64 - 1)))
2184 thread_info_offsets_are_bolixed_dave();
2185
2186 /* Attach to the address space of init_task. On SMP we
2187 * do this in smp.c:smp_callin for other cpus.
2188 */
2189 atomic_inc(&init_mm.mm_count);
2190 current->active_mm = &init_mm;
2191 }
This page took 0.081104 seconds and 5 git commands to generate.