[SPARC64]: Fix some SUN4V TLB handling bugs.
[deliverable/linux.git] / arch / sparc64 / kernel / traps.c
1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8 /*
9 * I like traps on v9, :))))
10 */
11
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
32 #include <asm/lsu.h>
33 #include <asm/dcu.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
41 #include <asm/head.h>
42 #ifdef CONFIG_KMOD
43 #include <linux/kmod.h>
44 #endif
45
46 struct notifier_block *sparc64die_chain;
47 static DEFINE_SPINLOCK(die_notifier_lock);
48
49 int register_die_notifier(struct notifier_block *nb)
50 {
51 int err = 0;
52 unsigned long flags;
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
56 return err;
57 }
58
59 /* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
62 * is as follows:
63 */
64 struct tl1_traplog {
65 struct {
66 unsigned long tstate;
67 unsigned long tpc;
68 unsigned long tnpc;
69 unsigned long tt;
70 } trapstack[4];
71 unsigned long tl;
72 };
73
74 static void dump_tl1_traplog(struct tl1_traplog *p)
75 {
76 int i, limit;
77
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 p->tl);
80
81 limit = (tlb_type == hypervisor) ? 2 : 4;
82 for (i = 0; i < 4; i++) {
83 printk(KERN_CRIT
84 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
85 "TNPC[%016lx] TT[%lx]\n",
86 i + 1,
87 p->trapstack[i].tstate, p->trapstack[i].tpc,
88 p->trapstack[i].tnpc, p->trapstack[i].tt);
89 }
90 }
91
92 void do_call_debug(struct pt_regs *regs)
93 {
94 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
95 }
96
97 void bad_trap(struct pt_regs *regs, long lvl)
98 {
99 char buffer[32];
100 siginfo_t info;
101
102 if (notify_die(DIE_TRAP, "bad trap", regs,
103 0, lvl, SIGTRAP) == NOTIFY_STOP)
104 return;
105
106 if (lvl < 0x100) {
107 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
108 die_if_kernel(buffer, regs);
109 }
110
111 lvl -= 0x100;
112 if (regs->tstate & TSTATE_PRIV) {
113 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
114 die_if_kernel(buffer, regs);
115 }
116 if (test_thread_flag(TIF_32BIT)) {
117 regs->tpc &= 0xffffffff;
118 regs->tnpc &= 0xffffffff;
119 }
120 info.si_signo = SIGILL;
121 info.si_errno = 0;
122 info.si_code = ILL_ILLTRP;
123 info.si_addr = (void __user *)regs->tpc;
124 info.si_trapno = lvl;
125 force_sig_info(SIGILL, &info, current);
126 }
127
128 void bad_trap_tl1(struct pt_regs *regs, long lvl)
129 {
130 char buffer[32];
131
132 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
133 0, lvl, SIGTRAP) == NOTIFY_STOP)
134 return;
135
136 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
137
138 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
139 die_if_kernel (buffer, regs);
140 }
141
142 #ifdef CONFIG_DEBUG_BUGVERBOSE
143 void do_BUG(const char *file, int line)
144 {
145 bust_spinlocks(1);
146 printk("kernel BUG at %s:%d!\n", file, line);
147 }
148 #endif
149
150 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
151 {
152 siginfo_t info;
153
154 if (notify_die(DIE_TRAP, "instruction access exception", regs,
155 0, 0x8, SIGTRAP) == NOTIFY_STOP)
156 return;
157
158 if (regs->tstate & TSTATE_PRIV) {
159 printk("spitfire_insn_access_exception: SFSR[%016lx] "
160 "SFAR[%016lx], going.\n", sfsr, sfar);
161 die_if_kernel("Iax", regs);
162 }
163 if (test_thread_flag(TIF_32BIT)) {
164 regs->tpc &= 0xffffffff;
165 regs->tnpc &= 0xffffffff;
166 }
167 info.si_signo = SIGSEGV;
168 info.si_errno = 0;
169 info.si_code = SEGV_MAPERR;
170 info.si_addr = (void __user *)regs->tpc;
171 info.si_trapno = 0;
172 force_sig_info(SIGSEGV, &info, current);
173 }
174
175 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
176 {
177 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
178 0, 0x8, SIGTRAP) == NOTIFY_STOP)
179 return;
180
181 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
182 spitfire_insn_access_exception(regs, sfsr, sfar);
183 }
184
185 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
186 {
187 unsigned short type = (type_ctx >> 16);
188 unsigned short ctx = (type_ctx & 0xffff);
189 siginfo_t info;
190
191 if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 return;
194
195 if (regs->tstate & TSTATE_PRIV) {
196 printk("sun4v_insn_access_exception: ADDR[%016lx] "
197 "CTX[%04x] TYPE[%04x], going.\n",
198 addr, ctx, type);
199 die_if_kernel("Iax", regs);
200 }
201
202 if (test_thread_flag(TIF_32BIT)) {
203 regs->tpc &= 0xffffffff;
204 regs->tnpc &= 0xffffffff;
205 }
206 info.si_signo = SIGSEGV;
207 info.si_errno = 0;
208 info.si_code = SEGV_MAPERR;
209 info.si_addr = (void __user *) addr;
210 info.si_trapno = 0;
211 force_sig_info(SIGSEGV, &info, current);
212 }
213
214 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
215 {
216 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
217 0, 0x8, SIGTRAP) == NOTIFY_STOP)
218 return;
219
220 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
221 sun4v_insn_access_exception(regs, addr, type_ctx);
222 }
223
224 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
225 {
226 siginfo_t info;
227
228 if (notify_die(DIE_TRAP, "data access exception", regs,
229 0, 0x30, SIGTRAP) == NOTIFY_STOP)
230 return;
231
232 if (regs->tstate & TSTATE_PRIV) {
233 /* Test if this comes from uaccess places. */
234 const struct exception_table_entry *entry;
235
236 entry = search_exception_tables(regs->tpc);
237 if (entry) {
238 /* Ouch, somebody is trying VM hole tricks on us... */
239 #ifdef DEBUG_EXCEPTIONS
240 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
241 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
242 regs->tpc, entry->fixup);
243 #endif
244 regs->tpc = entry->fixup;
245 regs->tnpc = regs->tpc + 4;
246 return;
247 }
248 /* Shit... */
249 printk("spitfire_data_access_exception: SFSR[%016lx] "
250 "SFAR[%016lx], going.\n", sfsr, sfar);
251 die_if_kernel("Dax", regs);
252 }
253
254 info.si_signo = SIGSEGV;
255 info.si_errno = 0;
256 info.si_code = SEGV_MAPERR;
257 info.si_addr = (void __user *)sfar;
258 info.si_trapno = 0;
259 force_sig_info(SIGSEGV, &info, current);
260 }
261
262 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
263 {
264 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
265 0, 0x30, SIGTRAP) == NOTIFY_STOP)
266 return;
267
268 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
269 spitfire_data_access_exception(regs, sfsr, sfar);
270 }
271
272 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
273 {
274 unsigned short type = (type_ctx >> 16);
275 unsigned short ctx = (type_ctx & 0xffff);
276 siginfo_t info;
277
278 if (notify_die(DIE_TRAP, "data access exception", regs,
279 0, 0x8, SIGTRAP) == NOTIFY_STOP)
280 return;
281
282 if (regs->tstate & TSTATE_PRIV) {
283 printk("sun4v_data_access_exception: ADDR[%016lx] "
284 "CTX[%04x] TYPE[%04x], going.\n",
285 addr, ctx, type);
286 die_if_kernel("Iax", regs);
287 }
288
289 if (test_thread_flag(TIF_32BIT)) {
290 regs->tpc &= 0xffffffff;
291 regs->tnpc &= 0xffffffff;
292 }
293 info.si_signo = SIGSEGV;
294 info.si_errno = 0;
295 info.si_code = SEGV_MAPERR;
296 info.si_addr = (void __user *) addr;
297 info.si_trapno = 0;
298 force_sig_info(SIGSEGV, &info, current);
299 }
300
301 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
302 {
303 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
304 0, 0x8, SIGTRAP) == NOTIFY_STOP)
305 return;
306
307 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
308 sun4v_data_access_exception(regs, addr, type_ctx);
309 }
310
311 #ifdef CONFIG_PCI
312 /* This is really pathetic... */
313 extern volatile int pci_poke_in_progress;
314 extern volatile int pci_poke_cpu;
315 extern volatile int pci_poke_faulted;
316 #endif
317
318 /* When access exceptions happen, we must do this. */
319 static void spitfire_clean_and_reenable_l1_caches(void)
320 {
321 unsigned long va;
322
323 if (tlb_type != spitfire)
324 BUG();
325
326 /* Clean 'em. */
327 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
328 spitfire_put_icache_tag(va, 0x0);
329 spitfire_put_dcache_tag(va, 0x0);
330 }
331
332 /* Re-enable in LSU. */
333 __asm__ __volatile__("flush %%g6\n\t"
334 "membar #Sync\n\t"
335 "stxa %0, [%%g0] %1\n\t"
336 "membar #Sync"
337 : /* no outputs */
338 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
339 LSU_CONTROL_IM | LSU_CONTROL_DM),
340 "i" (ASI_LSU_CONTROL)
341 : "memory");
342 }
343
344 static void spitfire_enable_estate_errors(void)
345 {
346 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
347 "membar #Sync"
348 : /* no outputs */
349 : "r" (ESTATE_ERR_ALL),
350 "i" (ASI_ESTATE_ERROR_EN));
351 }
352
353 static char ecc_syndrome_table[] = {
354 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
355 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
356 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
357 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
358 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
359 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
360 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
361 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
362 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
363 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
364 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
365 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
366 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
367 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
368 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
369 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
370 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
371 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
372 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
373 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
374 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
375 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
376 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
377 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
378 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
379 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
380 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
381 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
382 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
383 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
384 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
385 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
386 };
387
388 static char *syndrome_unknown = "<Unknown>";
389
390 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
391 {
392 unsigned short scode;
393 char memmod_str[64], *p;
394
395 if (udbl & bit) {
396 scode = ecc_syndrome_table[udbl & 0xff];
397 if (prom_getunumber(scode, afar,
398 memmod_str, sizeof(memmod_str)) == -1)
399 p = syndrome_unknown;
400 else
401 p = memmod_str;
402 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
403 "Memory Module \"%s\"\n",
404 smp_processor_id(), scode, p);
405 }
406
407 if (udbh & bit) {
408 scode = ecc_syndrome_table[udbh & 0xff];
409 if (prom_getunumber(scode, afar,
410 memmod_str, sizeof(memmod_str)) == -1)
411 p = syndrome_unknown;
412 else
413 p = memmod_str;
414 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
415 "Memory Module \"%s\"\n",
416 smp_processor_id(), scode, p);
417 }
418
419 }
420
421 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
422 {
423
424 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
425 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
426 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
427
428 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
429
430 /* We always log it, even if someone is listening for this
431 * trap.
432 */
433 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
434 0, TRAP_TYPE_CEE, SIGTRAP);
435
436 /* The Correctable ECC Error trap does not disable I/D caches. So
437 * we only have to restore the ESTATE Error Enable register.
438 */
439 spitfire_enable_estate_errors();
440 }
441
442 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
443 {
444 siginfo_t info;
445
446 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
447 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
448 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
449
450 /* XXX add more human friendly logging of the error status
451 * XXX as is implemented for cheetah
452 */
453
454 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
455
456 /* We always log it, even if someone is listening for this
457 * trap.
458 */
459 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
460 0, tt, SIGTRAP);
461
462 if (regs->tstate & TSTATE_PRIV) {
463 if (tl1)
464 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
465 die_if_kernel("UE", regs);
466 }
467
468 /* XXX need more intelligent processing here, such as is implemented
469 * XXX for cheetah errors, in fact if the E-cache still holds the
470 * XXX line with bad parity this will loop
471 */
472
473 spitfire_clean_and_reenable_l1_caches();
474 spitfire_enable_estate_errors();
475
476 if (test_thread_flag(TIF_32BIT)) {
477 regs->tpc &= 0xffffffff;
478 regs->tnpc &= 0xffffffff;
479 }
480 info.si_signo = SIGBUS;
481 info.si_errno = 0;
482 info.si_code = BUS_OBJERR;
483 info.si_addr = (void *)0;
484 info.si_trapno = 0;
485 force_sig_info(SIGBUS, &info, current);
486 }
487
488 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
489 {
490 unsigned long afsr, tt, udbh, udbl;
491 int tl1;
492
493 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
494 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
495 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
496 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
497 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
498
499 #ifdef CONFIG_PCI
500 if (tt == TRAP_TYPE_DAE &&
501 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
502 spitfire_clean_and_reenable_l1_caches();
503 spitfire_enable_estate_errors();
504
505 pci_poke_faulted = 1;
506 regs->tnpc = regs->tpc + 4;
507 return;
508 }
509 #endif
510
511 if (afsr & SFAFSR_UE)
512 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
513
514 if (tt == TRAP_TYPE_CEE) {
515 /* Handle the case where we took a CEE trap, but ACK'd
516 * only the UE state in the UDB error registers.
517 */
518 if (afsr & SFAFSR_UE) {
519 if (udbh & UDBE_CE) {
520 __asm__ __volatile__(
521 "stxa %0, [%1] %2\n\t"
522 "membar #Sync"
523 : /* no outputs */
524 : "r" (udbh & UDBE_CE),
525 "r" (0x0), "i" (ASI_UDB_ERROR_W));
526 }
527 if (udbl & UDBE_CE) {
528 __asm__ __volatile__(
529 "stxa %0, [%1] %2\n\t"
530 "membar #Sync"
531 : /* no outputs */
532 : "r" (udbl & UDBE_CE),
533 "r" (0x18), "i" (ASI_UDB_ERROR_W));
534 }
535 }
536
537 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
538 }
539 }
540
541 int cheetah_pcache_forced_on;
542
543 void cheetah_enable_pcache(void)
544 {
545 unsigned long dcr;
546
547 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
548 smp_processor_id());
549
550 __asm__ __volatile__("ldxa [%%g0] %1, %0"
551 : "=r" (dcr)
552 : "i" (ASI_DCU_CONTROL_REG));
553 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
554 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
555 "membar #Sync"
556 : /* no outputs */
557 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
558 }
559
560 /* Cheetah error trap handling. */
561 static unsigned long ecache_flush_physbase;
562 static unsigned long ecache_flush_linesize;
563 static unsigned long ecache_flush_size;
564
565 /* WARNING: The error trap handlers in assembly know the precise
566 * layout of the following structure.
567 *
568 * C-level handlers below use this information to log the error
569 * and then determine how to recover (if possible).
570 */
571 struct cheetah_err_info {
572 /*0x00*/u64 afsr;
573 /*0x08*/u64 afar;
574
575 /* D-cache state */
576 /*0x10*/u64 dcache_data[4]; /* The actual data */
577 /*0x30*/u64 dcache_index; /* D-cache index */
578 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
579 /*0x40*/u64 dcache_utag; /* D-cache microtag */
580 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
581
582 /* I-cache state */
583 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
584 /*0x90*/u64 icache_index; /* I-cache index */
585 /*0x98*/u64 icache_tag; /* I-cache phys tag */
586 /*0xa0*/u64 icache_utag; /* I-cache microtag */
587 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
588 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
589 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
590
591 /* E-cache state */
592 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
593 /*0xe0*/u64 ecache_index; /* E-cache index */
594 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
595
596 /*0xf0*/u64 __pad[32 - 30];
597 };
598 #define CHAFSR_INVALID ((u64)-1L)
599
600 /* This table is ordered in priority of errors and matches the
601 * AFAR overwrite policy as well.
602 */
603
604 struct afsr_error_table {
605 unsigned long mask;
606 const char *name;
607 };
608
609 static const char CHAFSR_PERR_msg[] =
610 "System interface protocol error";
611 static const char CHAFSR_IERR_msg[] =
612 "Internal processor error";
613 static const char CHAFSR_ISAP_msg[] =
614 "System request parity error on incoming addresss";
615 static const char CHAFSR_UCU_msg[] =
616 "Uncorrectable E-cache ECC error for ifetch/data";
617 static const char CHAFSR_UCC_msg[] =
618 "SW Correctable E-cache ECC error for ifetch/data";
619 static const char CHAFSR_UE_msg[] =
620 "Uncorrectable system bus data ECC error for read";
621 static const char CHAFSR_EDU_msg[] =
622 "Uncorrectable E-cache ECC error for stmerge/blkld";
623 static const char CHAFSR_EMU_msg[] =
624 "Uncorrectable system bus MTAG error";
625 static const char CHAFSR_WDU_msg[] =
626 "Uncorrectable E-cache ECC error for writeback";
627 static const char CHAFSR_CPU_msg[] =
628 "Uncorrectable ECC error for copyout";
629 static const char CHAFSR_CE_msg[] =
630 "HW corrected system bus data ECC error for read";
631 static const char CHAFSR_EDC_msg[] =
632 "HW corrected E-cache ECC error for stmerge/blkld";
633 static const char CHAFSR_EMC_msg[] =
634 "HW corrected system bus MTAG ECC error";
635 static const char CHAFSR_WDC_msg[] =
636 "HW corrected E-cache ECC error for writeback";
637 static const char CHAFSR_CPC_msg[] =
638 "HW corrected ECC error for copyout";
639 static const char CHAFSR_TO_msg[] =
640 "Unmapped error from system bus";
641 static const char CHAFSR_BERR_msg[] =
642 "Bus error response from system bus";
643 static const char CHAFSR_IVC_msg[] =
644 "HW corrected system bus data ECC error for ivec read";
645 static const char CHAFSR_IVU_msg[] =
646 "Uncorrectable system bus data ECC error for ivec read";
647 static struct afsr_error_table __cheetah_error_table[] = {
648 { CHAFSR_PERR, CHAFSR_PERR_msg },
649 { CHAFSR_IERR, CHAFSR_IERR_msg },
650 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
651 { CHAFSR_UCU, CHAFSR_UCU_msg },
652 { CHAFSR_UCC, CHAFSR_UCC_msg },
653 { CHAFSR_UE, CHAFSR_UE_msg },
654 { CHAFSR_EDU, CHAFSR_EDU_msg },
655 { CHAFSR_EMU, CHAFSR_EMU_msg },
656 { CHAFSR_WDU, CHAFSR_WDU_msg },
657 { CHAFSR_CPU, CHAFSR_CPU_msg },
658 { CHAFSR_CE, CHAFSR_CE_msg },
659 { CHAFSR_EDC, CHAFSR_EDC_msg },
660 { CHAFSR_EMC, CHAFSR_EMC_msg },
661 { CHAFSR_WDC, CHAFSR_WDC_msg },
662 { CHAFSR_CPC, CHAFSR_CPC_msg },
663 { CHAFSR_TO, CHAFSR_TO_msg },
664 { CHAFSR_BERR, CHAFSR_BERR_msg },
665 /* These two do not update the AFAR. */
666 { CHAFSR_IVC, CHAFSR_IVC_msg },
667 { CHAFSR_IVU, CHAFSR_IVU_msg },
668 { 0, NULL },
669 };
670 static const char CHPAFSR_DTO_msg[] =
671 "System bus unmapped error for prefetch/storequeue-read";
672 static const char CHPAFSR_DBERR_msg[] =
673 "System bus error for prefetch/storequeue-read";
674 static const char CHPAFSR_THCE_msg[] =
675 "Hardware corrected E-cache Tag ECC error";
676 static const char CHPAFSR_TSCE_msg[] =
677 "SW handled correctable E-cache Tag ECC error";
678 static const char CHPAFSR_TUE_msg[] =
679 "Uncorrectable E-cache Tag ECC error";
680 static const char CHPAFSR_DUE_msg[] =
681 "System bus uncorrectable data ECC error due to prefetch/store-fill";
682 static struct afsr_error_table __cheetah_plus_error_table[] = {
683 { CHAFSR_PERR, CHAFSR_PERR_msg },
684 { CHAFSR_IERR, CHAFSR_IERR_msg },
685 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
686 { CHAFSR_UCU, CHAFSR_UCU_msg },
687 { CHAFSR_UCC, CHAFSR_UCC_msg },
688 { CHAFSR_UE, CHAFSR_UE_msg },
689 { CHAFSR_EDU, CHAFSR_EDU_msg },
690 { CHAFSR_EMU, CHAFSR_EMU_msg },
691 { CHAFSR_WDU, CHAFSR_WDU_msg },
692 { CHAFSR_CPU, CHAFSR_CPU_msg },
693 { CHAFSR_CE, CHAFSR_CE_msg },
694 { CHAFSR_EDC, CHAFSR_EDC_msg },
695 { CHAFSR_EMC, CHAFSR_EMC_msg },
696 { CHAFSR_WDC, CHAFSR_WDC_msg },
697 { CHAFSR_CPC, CHAFSR_CPC_msg },
698 { CHAFSR_TO, CHAFSR_TO_msg },
699 { CHAFSR_BERR, CHAFSR_BERR_msg },
700 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
701 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
702 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
703 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
704 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
705 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
706 /* These two do not update the AFAR. */
707 { CHAFSR_IVC, CHAFSR_IVC_msg },
708 { CHAFSR_IVU, CHAFSR_IVU_msg },
709 { 0, NULL },
710 };
711 static const char JPAFSR_JETO_msg[] =
712 "System interface protocol error, hw timeout caused";
713 static const char JPAFSR_SCE_msg[] =
714 "Parity error on system snoop results";
715 static const char JPAFSR_JEIC_msg[] =
716 "System interface protocol error, illegal command detected";
717 static const char JPAFSR_JEIT_msg[] =
718 "System interface protocol error, illegal ADTYPE detected";
719 static const char JPAFSR_OM_msg[] =
720 "Out of range memory error has occurred";
721 static const char JPAFSR_ETP_msg[] =
722 "Parity error on L2 cache tag SRAM";
723 static const char JPAFSR_UMS_msg[] =
724 "Error due to unsupported store";
725 static const char JPAFSR_RUE_msg[] =
726 "Uncorrectable ECC error from remote cache/memory";
727 static const char JPAFSR_RCE_msg[] =
728 "Correctable ECC error from remote cache/memory";
729 static const char JPAFSR_BP_msg[] =
730 "JBUS parity error on returned read data";
731 static const char JPAFSR_WBP_msg[] =
732 "JBUS parity error on data for writeback or block store";
733 static const char JPAFSR_FRC_msg[] =
734 "Foreign read to DRAM incurring correctable ECC error";
735 static const char JPAFSR_FRU_msg[] =
736 "Foreign read to DRAM incurring uncorrectable ECC error";
737 static struct afsr_error_table __jalapeno_error_table[] = {
738 { JPAFSR_JETO, JPAFSR_JETO_msg },
739 { JPAFSR_SCE, JPAFSR_SCE_msg },
740 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
741 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
742 { CHAFSR_PERR, CHAFSR_PERR_msg },
743 { CHAFSR_IERR, CHAFSR_IERR_msg },
744 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
745 { CHAFSR_UCU, CHAFSR_UCU_msg },
746 { CHAFSR_UCC, CHAFSR_UCC_msg },
747 { CHAFSR_UE, CHAFSR_UE_msg },
748 { CHAFSR_EDU, CHAFSR_EDU_msg },
749 { JPAFSR_OM, JPAFSR_OM_msg },
750 { CHAFSR_WDU, CHAFSR_WDU_msg },
751 { CHAFSR_CPU, CHAFSR_CPU_msg },
752 { CHAFSR_CE, CHAFSR_CE_msg },
753 { CHAFSR_EDC, CHAFSR_EDC_msg },
754 { JPAFSR_ETP, JPAFSR_ETP_msg },
755 { CHAFSR_WDC, CHAFSR_WDC_msg },
756 { CHAFSR_CPC, CHAFSR_CPC_msg },
757 { CHAFSR_TO, CHAFSR_TO_msg },
758 { CHAFSR_BERR, CHAFSR_BERR_msg },
759 { JPAFSR_UMS, JPAFSR_UMS_msg },
760 { JPAFSR_RUE, JPAFSR_RUE_msg },
761 { JPAFSR_RCE, JPAFSR_RCE_msg },
762 { JPAFSR_BP, JPAFSR_BP_msg },
763 { JPAFSR_WBP, JPAFSR_WBP_msg },
764 { JPAFSR_FRC, JPAFSR_FRC_msg },
765 { JPAFSR_FRU, JPAFSR_FRU_msg },
766 /* These two do not update the AFAR. */
767 { CHAFSR_IVU, CHAFSR_IVU_msg },
768 { 0, NULL },
769 };
770 static struct afsr_error_table *cheetah_error_table;
771 static unsigned long cheetah_afsr_errors;
772
773 /* This is allocated at boot time based upon the largest hardware
774 * cpu ID in the system. We allocate two entries per cpu, one for
775 * TL==0 logging and one for TL >= 1 logging.
776 */
777 struct cheetah_err_info *cheetah_error_log;
778
779 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
780 {
781 struct cheetah_err_info *p;
782 int cpu = smp_processor_id();
783
784 if (!cheetah_error_log)
785 return NULL;
786
787 p = cheetah_error_log + (cpu * 2);
788 if ((afsr & CHAFSR_TL1) != 0UL)
789 p++;
790
791 return p;
792 }
793
794 extern unsigned int tl0_icpe[], tl1_icpe[];
795 extern unsigned int tl0_dcpe[], tl1_dcpe[];
796 extern unsigned int tl0_fecc[], tl1_fecc[];
797 extern unsigned int tl0_cee[], tl1_cee[];
798 extern unsigned int tl0_iae[], tl1_iae[];
799 extern unsigned int tl0_dae[], tl1_dae[];
800 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
801 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
802 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
803 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
804 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
805
806 void __init cheetah_ecache_flush_init(void)
807 {
808 unsigned long largest_size, smallest_linesize, order, ver;
809 int node, i, instance;
810
811 /* Scan all cpu device tree nodes, note two values:
812 * 1) largest E-cache size
813 * 2) smallest E-cache line size
814 */
815 largest_size = 0UL;
816 smallest_linesize = ~0UL;
817
818 instance = 0;
819 while (!cpu_find_by_instance(instance, &node, NULL)) {
820 unsigned long val;
821
822 val = prom_getintdefault(node, "ecache-size",
823 (2 * 1024 * 1024));
824 if (val > largest_size)
825 largest_size = val;
826 val = prom_getintdefault(node, "ecache-line-size", 64);
827 if (val < smallest_linesize)
828 smallest_linesize = val;
829 instance++;
830 }
831
832 if (largest_size == 0UL || smallest_linesize == ~0UL) {
833 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
834 "parameters.\n");
835 prom_halt();
836 }
837
838 ecache_flush_size = (2 * largest_size);
839 ecache_flush_linesize = smallest_linesize;
840
841 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
842
843 if (ecache_flush_physbase == ~0UL) {
844 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
845 "contiguous physical memory.\n",
846 ecache_flush_size);
847 prom_halt();
848 }
849
850 /* Now allocate error trap reporting scoreboard. */
851 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
852 for (order = 0; order < MAX_ORDER; order++) {
853 if ((PAGE_SIZE << order) >= node)
854 break;
855 }
856 cheetah_error_log = (struct cheetah_err_info *)
857 __get_free_pages(GFP_KERNEL, order);
858 if (!cheetah_error_log) {
859 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
860 "error logging scoreboard (%d bytes).\n", node);
861 prom_halt();
862 }
863 memset(cheetah_error_log, 0, PAGE_SIZE << order);
864
865 /* Mark all AFSRs as invalid so that the trap handler will
866 * log new new information there.
867 */
868 for (i = 0; i < 2 * NR_CPUS; i++)
869 cheetah_error_log[i].afsr = CHAFSR_INVALID;
870
871 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
872 if ((ver >> 32) == __JALAPENO_ID ||
873 (ver >> 32) == __SERRANO_ID) {
874 cheetah_error_table = &__jalapeno_error_table[0];
875 cheetah_afsr_errors = JPAFSR_ERRORS;
876 } else if ((ver >> 32) == 0x003e0015) {
877 cheetah_error_table = &__cheetah_plus_error_table[0];
878 cheetah_afsr_errors = CHPAFSR_ERRORS;
879 } else {
880 cheetah_error_table = &__cheetah_error_table[0];
881 cheetah_afsr_errors = CHAFSR_ERRORS;
882 }
883
884 /* Now patch trap tables. */
885 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
886 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
887 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
888 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
890 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
891 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
892 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
893 if (tlb_type == cheetah_plus) {
894 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
895 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
896 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
897 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
898 }
899 flushi(PAGE_OFFSET);
900 }
901
902 static void cheetah_flush_ecache(void)
903 {
904 unsigned long flush_base = ecache_flush_physbase;
905 unsigned long flush_linesize = ecache_flush_linesize;
906 unsigned long flush_size = ecache_flush_size;
907
908 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
909 " bne,pt %%xcc, 1b\n\t"
910 " ldxa [%2 + %0] %3, %%g0\n\t"
911 : "=&r" (flush_size)
912 : "0" (flush_size), "r" (flush_base),
913 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
914 }
915
916 static void cheetah_flush_ecache_line(unsigned long physaddr)
917 {
918 unsigned long alias;
919
920 physaddr &= ~(8UL - 1UL);
921 physaddr = (ecache_flush_physbase +
922 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
923 alias = physaddr + (ecache_flush_size >> 1UL);
924 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
925 "ldxa [%1] %2, %%g0\n\t"
926 "membar #Sync"
927 : /* no outputs */
928 : "r" (physaddr), "r" (alias),
929 "i" (ASI_PHYS_USE_EC));
930 }
931
932 /* Unfortunately, the diagnostic access to the I-cache tags we need to
933 * use to clear the thing interferes with I-cache coherency transactions.
934 *
935 * So we must only flush the I-cache when it is disabled.
936 */
937 static void __cheetah_flush_icache(void)
938 {
939 unsigned int icache_size, icache_line_size;
940 unsigned long addr;
941
942 icache_size = local_cpu_data().icache_size;
943 icache_line_size = local_cpu_data().icache_line_size;
944
945 /* Clear the valid bits in all the tags. */
946 for (addr = 0; addr < icache_size; addr += icache_line_size) {
947 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
948 "membar #Sync"
949 : /* no outputs */
950 : "r" (addr | (2 << 3)),
951 "i" (ASI_IC_TAG));
952 }
953 }
954
955 static void cheetah_flush_icache(void)
956 {
957 unsigned long dcu_save;
958
959 /* Save current DCU, disable I-cache. */
960 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
961 "or %0, %2, %%g1\n\t"
962 "stxa %%g1, [%%g0] %1\n\t"
963 "membar #Sync"
964 : "=r" (dcu_save)
965 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
966 : "g1");
967
968 __cheetah_flush_icache();
969
970 /* Restore DCU register */
971 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
972 "membar #Sync"
973 : /* no outputs */
974 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
975 }
976
977 static void cheetah_flush_dcache(void)
978 {
979 unsigned int dcache_size, dcache_line_size;
980 unsigned long addr;
981
982 dcache_size = local_cpu_data().dcache_size;
983 dcache_line_size = local_cpu_data().dcache_line_size;
984
985 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
986 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
987 "membar #Sync"
988 : /* no outputs */
989 : "r" (addr), "i" (ASI_DCACHE_TAG));
990 }
991 }
992
993 /* In order to make the even parity correct we must do two things.
994 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
995 * Next, we clear out all 32-bytes of data for that line. Data of
996 * all-zero + tag parity value of zero == correct parity.
997 */
998 static void cheetah_plus_zap_dcache_parity(void)
999 {
1000 unsigned int dcache_size, dcache_line_size;
1001 unsigned long addr;
1002
1003 dcache_size = local_cpu_data().dcache_size;
1004 dcache_line_size = local_cpu_data().dcache_line_size;
1005
1006 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1007 unsigned long tag = (addr >> 14);
1008 unsigned long line;
1009
1010 __asm__ __volatile__("membar #Sync\n\t"
1011 "stxa %0, [%1] %2\n\t"
1012 "membar #Sync"
1013 : /* no outputs */
1014 : "r" (tag), "r" (addr),
1015 "i" (ASI_DCACHE_UTAG));
1016 for (line = addr; line < addr + dcache_line_size; line += 8)
1017 __asm__ __volatile__("membar #Sync\n\t"
1018 "stxa %%g0, [%0] %1\n\t"
1019 "membar #Sync"
1020 : /* no outputs */
1021 : "r" (line),
1022 "i" (ASI_DCACHE_DATA));
1023 }
1024 }
1025
1026 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1027 * something palatable to the memory controller driver get_unumber
1028 * routine.
1029 */
1030 #define MT0 137
1031 #define MT1 138
1032 #define MT2 139
1033 #define NONE 254
1034 #define MTC0 140
1035 #define MTC1 141
1036 #define MTC2 142
1037 #define MTC3 143
1038 #define C0 128
1039 #define C1 129
1040 #define C2 130
1041 #define C3 131
1042 #define C4 132
1043 #define C5 133
1044 #define C6 134
1045 #define C7 135
1046 #define C8 136
1047 #define M2 144
1048 #define M3 145
1049 #define M4 146
1050 #define M 147
1051 static unsigned char cheetah_ecc_syntab[] = {
1052 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1053 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1054 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1055 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1056 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1057 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1058 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1059 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1060 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1061 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1062 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1063 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1064 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1065 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1066 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1067 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1068 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1069 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1070 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1071 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1072 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1073 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1074 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1075 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1076 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1077 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1078 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1079 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1080 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1081 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1082 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1083 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1084 };
1085 static unsigned char cheetah_mtag_syntab[] = {
1086 NONE, MTC0,
1087 MTC1, NONE,
1088 MTC2, NONE,
1089 NONE, MT0,
1090 MTC3, NONE,
1091 NONE, MT1,
1092 NONE, MT2,
1093 NONE, NONE
1094 };
1095
1096 /* Return the highest priority error conditon mentioned. */
1097 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1098 {
1099 unsigned long tmp = 0;
1100 int i;
1101
1102 for (i = 0; cheetah_error_table[i].mask; i++) {
1103 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1104 return tmp;
1105 }
1106 return tmp;
1107 }
1108
1109 static const char *cheetah_get_string(unsigned long bit)
1110 {
1111 int i;
1112
1113 for (i = 0; cheetah_error_table[i].mask; i++) {
1114 if ((bit & cheetah_error_table[i].mask) != 0UL)
1115 return cheetah_error_table[i].name;
1116 }
1117 return "???";
1118 }
1119
1120 extern int chmc_getunumber(int, unsigned long, char *, int);
1121
1122 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1123 unsigned long afsr, unsigned long afar, int recoverable)
1124 {
1125 unsigned long hipri;
1126 char unum[256];
1127
1128 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1129 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1130 afsr, afar,
1131 (afsr & CHAFSR_TL1) ? 1 : 0);
1132 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 regs->tpc, regs->tnpc, regs->tstate);
1135 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1138 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1139 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1140 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1141 hipri = cheetah_get_hipri(afsr);
1142 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1143 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1144 hipri, cheetah_get_string(hipri));
1145
1146 /* Try to get unumber if relevant. */
1147 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1148 CHAFSR_CPC | CHAFSR_CPU | \
1149 CHAFSR_UE | CHAFSR_CE | \
1150 CHAFSR_EDC | CHAFSR_EDU | \
1151 CHAFSR_UCC | CHAFSR_UCU | \
1152 CHAFSR_WDU | CHAFSR_WDC)
1153 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1154 if (afsr & ESYND_ERRORS) {
1155 int syndrome;
1156 int ret;
1157
1158 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1159 syndrome = cheetah_ecc_syntab[syndrome];
1160 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1161 if (ret != -1)
1162 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT),
1164 smp_processor_id(), unum);
1165 } else if (afsr & MSYND_ERRORS) {
1166 int syndrome;
1167 int ret;
1168
1169 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1170 syndrome = cheetah_mtag_syntab[syndrome];
1171 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1172 if (ret != -1)
1173 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT),
1175 smp_processor_id(), unum);
1176 }
1177
1178 /* Now dump the cache snapshots. */
1179 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1180 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181 (int) info->dcache_index,
1182 info->dcache_tag,
1183 info->dcache_utag,
1184 info->dcache_stag);
1185 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1186 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1187 info->dcache_data[0],
1188 info->dcache_data[1],
1189 info->dcache_data[2],
1190 info->dcache_data[3]);
1191 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1192 "u[%016lx] l[%016lx]\n",
1193 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1194 (int) info->icache_index,
1195 info->icache_tag,
1196 info->icache_utag,
1197 info->icache_stag,
1198 info->icache_upper,
1199 info->icache_lower);
1200 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1201 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 info->icache_data[0],
1203 info->icache_data[1],
1204 info->icache_data[2],
1205 info->icache_data[3]);
1206 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1207 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1208 info->icache_data[4],
1209 info->icache_data[5],
1210 info->icache_data[6],
1211 info->icache_data[7]);
1212 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1213 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1214 (int) info->ecache_index, info->ecache_tag);
1215 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1216 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1217 info->ecache_data[0],
1218 info->ecache_data[1],
1219 info->ecache_data[2],
1220 info->ecache_data[3]);
1221
1222 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1223 while (afsr != 0UL) {
1224 unsigned long bit = cheetah_get_hipri(afsr);
1225
1226 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1227 (recoverable ? KERN_WARNING : KERN_CRIT),
1228 bit, cheetah_get_string(bit));
1229
1230 afsr &= ~bit;
1231 }
1232
1233 if (!recoverable)
1234 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1235 }
1236
1237 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1238 {
1239 unsigned long afsr, afar;
1240 int ret = 0;
1241
1242 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1243 : "=r" (afsr)
1244 : "i" (ASI_AFSR));
1245 if ((afsr & cheetah_afsr_errors) != 0) {
1246 if (logp != NULL) {
1247 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1248 : "=r" (afar)
1249 : "i" (ASI_AFAR));
1250 logp->afsr = afsr;
1251 logp->afar = afar;
1252 }
1253 ret = 1;
1254 }
1255 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1256 "membar #Sync\n\t"
1257 : : "r" (afsr), "i" (ASI_AFSR));
1258
1259 return ret;
1260 }
1261
1262 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1263 {
1264 struct cheetah_err_info local_snapshot, *p;
1265 int recoverable;
1266
1267 /* Flush E-cache */
1268 cheetah_flush_ecache();
1269
1270 p = cheetah_get_error_log(afsr);
1271 if (!p) {
1272 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1273 afsr, afar);
1274 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1275 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1276 prom_halt();
1277 }
1278
1279 /* Grab snapshot of logged error. */
1280 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1281
1282 /* If the current trap snapshot does not match what the
1283 * trap handler passed along into our args, big trouble.
1284 * In such a case, mark the local copy as invalid.
1285 *
1286 * Else, it matches and we mark the afsr in the non-local
1287 * copy as invalid so we may log new error traps there.
1288 */
1289 if (p->afsr != afsr || p->afar != afar)
1290 local_snapshot.afsr = CHAFSR_INVALID;
1291 else
1292 p->afsr = CHAFSR_INVALID;
1293
1294 cheetah_flush_icache();
1295 cheetah_flush_dcache();
1296
1297 /* Re-enable I-cache/D-cache */
1298 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1299 "or %%g1, %1, %%g1\n\t"
1300 "stxa %%g1, [%%g0] %0\n\t"
1301 "membar #Sync"
1302 : /* no outputs */
1303 : "i" (ASI_DCU_CONTROL_REG),
1304 "i" (DCU_DC | DCU_IC)
1305 : "g1");
1306
1307 /* Re-enable error reporting */
1308 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1309 "or %%g1, %1, %%g1\n\t"
1310 "stxa %%g1, [%%g0] %0\n\t"
1311 "membar #Sync"
1312 : /* no outputs */
1313 : "i" (ASI_ESTATE_ERROR_EN),
1314 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1315 : "g1");
1316
1317 /* Decide if we can continue after handling this trap and
1318 * logging the error.
1319 */
1320 recoverable = 1;
1321 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1322 recoverable = 0;
1323
1324 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1325 * error was logged while we had error reporting traps disabled.
1326 */
1327 if (cheetah_recheck_errors(&local_snapshot)) {
1328 unsigned long new_afsr = local_snapshot.afsr;
1329
1330 /* If we got a new asynchronous error, die... */
1331 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1332 CHAFSR_WDU | CHAFSR_CPU |
1333 CHAFSR_IVU | CHAFSR_UE |
1334 CHAFSR_BERR | CHAFSR_TO))
1335 recoverable = 0;
1336 }
1337
1338 /* Log errors. */
1339 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1340
1341 if (!recoverable)
1342 panic("Irrecoverable Fast-ECC error trap.\n");
1343
1344 /* Flush E-cache to kick the error trap handlers out. */
1345 cheetah_flush_ecache();
1346 }
1347
1348 /* Try to fix a correctable error by pushing the line out from
1349 * the E-cache. Recheck error reporting registers to see if the
1350 * problem is intermittent.
1351 */
1352 static int cheetah_fix_ce(unsigned long physaddr)
1353 {
1354 unsigned long orig_estate;
1355 unsigned long alias1, alias2;
1356 int ret;
1357
1358 /* Make sure correctable error traps are disabled. */
1359 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1360 "andn %0, %1, %%g1\n\t"
1361 "stxa %%g1, [%%g0] %2\n\t"
1362 "membar #Sync"
1363 : "=&r" (orig_estate)
1364 : "i" (ESTATE_ERROR_CEEN),
1365 "i" (ASI_ESTATE_ERROR_EN)
1366 : "g1");
1367
1368 /* We calculate alias addresses that will force the
1369 * cache line in question out of the E-cache. Then
1370 * we bring it back in with an atomic instruction so
1371 * that we get it in some modified/exclusive state,
1372 * then we displace it again to try and get proper ECC
1373 * pushed back into the system.
1374 */
1375 physaddr &= ~(8UL - 1UL);
1376 alias1 = (ecache_flush_physbase +
1377 (physaddr & ((ecache_flush_size >> 1) - 1)));
1378 alias2 = alias1 + (ecache_flush_size >> 1);
1379 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1380 "ldxa [%1] %3, %%g0\n\t"
1381 "casxa [%2] %3, %%g0, %%g0\n\t"
1382 "membar #StoreLoad | #StoreStore\n\t"
1383 "ldxa [%0] %3, %%g0\n\t"
1384 "ldxa [%1] %3, %%g0\n\t"
1385 "membar #Sync"
1386 : /* no outputs */
1387 : "r" (alias1), "r" (alias2),
1388 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1389
1390 /* Did that trigger another error? */
1391 if (cheetah_recheck_errors(NULL)) {
1392 /* Try one more time. */
1393 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1394 "membar #Sync"
1395 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1396 if (cheetah_recheck_errors(NULL))
1397 ret = 2;
1398 else
1399 ret = 1;
1400 } else {
1401 /* No new error, intermittent problem. */
1402 ret = 0;
1403 }
1404
1405 /* Restore error enables. */
1406 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1407 "membar #Sync"
1408 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1409
1410 return ret;
1411 }
1412
1413 /* Return non-zero if PADDR is a valid physical memory address. */
1414 static int cheetah_check_main_memory(unsigned long paddr)
1415 {
1416 unsigned long vaddr = PAGE_OFFSET + paddr;
1417
1418 if (vaddr > (unsigned long) high_memory)
1419 return 0;
1420
1421 return kern_addr_valid(vaddr);
1422 }
1423
1424 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1425 {
1426 struct cheetah_err_info local_snapshot, *p;
1427 int recoverable, is_memory;
1428
1429 p = cheetah_get_error_log(afsr);
1430 if (!p) {
1431 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1432 afsr, afar);
1433 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1434 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1435 prom_halt();
1436 }
1437
1438 /* Grab snapshot of logged error. */
1439 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1440
1441 /* If the current trap snapshot does not match what the
1442 * trap handler passed along into our args, big trouble.
1443 * In such a case, mark the local copy as invalid.
1444 *
1445 * Else, it matches and we mark the afsr in the non-local
1446 * copy as invalid so we may log new error traps there.
1447 */
1448 if (p->afsr != afsr || p->afar != afar)
1449 local_snapshot.afsr = CHAFSR_INVALID;
1450 else
1451 p->afsr = CHAFSR_INVALID;
1452
1453 is_memory = cheetah_check_main_memory(afar);
1454
1455 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1456 /* XXX Might want to log the results of this operation
1457 * XXX somewhere... -DaveM
1458 */
1459 cheetah_fix_ce(afar);
1460 }
1461
1462 {
1463 int flush_all, flush_line;
1464
1465 flush_all = flush_line = 0;
1466 if ((afsr & CHAFSR_EDC) != 0UL) {
1467 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1468 flush_line = 1;
1469 else
1470 flush_all = 1;
1471 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1472 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1473 flush_line = 1;
1474 else
1475 flush_all = 1;
1476 }
1477
1478 /* Trap handler only disabled I-cache, flush it. */
1479 cheetah_flush_icache();
1480
1481 /* Re-enable I-cache */
1482 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1483 "or %%g1, %1, %%g1\n\t"
1484 "stxa %%g1, [%%g0] %0\n\t"
1485 "membar #Sync"
1486 : /* no outputs */
1487 : "i" (ASI_DCU_CONTROL_REG),
1488 "i" (DCU_IC)
1489 : "g1");
1490
1491 if (flush_all)
1492 cheetah_flush_ecache();
1493 else if (flush_line)
1494 cheetah_flush_ecache_line(afar);
1495 }
1496
1497 /* Re-enable error reporting */
1498 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1499 "or %%g1, %1, %%g1\n\t"
1500 "stxa %%g1, [%%g0] %0\n\t"
1501 "membar #Sync"
1502 : /* no outputs */
1503 : "i" (ASI_ESTATE_ERROR_EN),
1504 "i" (ESTATE_ERROR_CEEN)
1505 : "g1");
1506
1507 /* Decide if we can continue after handling this trap and
1508 * logging the error.
1509 */
1510 recoverable = 1;
1511 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1512 recoverable = 0;
1513
1514 /* Re-check AFSR/AFAR */
1515 (void) cheetah_recheck_errors(&local_snapshot);
1516
1517 /* Log errors. */
1518 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1519
1520 if (!recoverable)
1521 panic("Irrecoverable Correctable-ECC error trap.\n");
1522 }
1523
1524 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1525 {
1526 struct cheetah_err_info local_snapshot, *p;
1527 int recoverable, is_memory;
1528
1529 #ifdef CONFIG_PCI
1530 /* Check for the special PCI poke sequence. */
1531 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1532 cheetah_flush_icache();
1533 cheetah_flush_dcache();
1534
1535 /* Re-enable I-cache/D-cache */
1536 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1537 "or %%g1, %1, %%g1\n\t"
1538 "stxa %%g1, [%%g0] %0\n\t"
1539 "membar #Sync"
1540 : /* no outputs */
1541 : "i" (ASI_DCU_CONTROL_REG),
1542 "i" (DCU_DC | DCU_IC)
1543 : "g1");
1544
1545 /* Re-enable error reporting */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1549 "membar #Sync"
1550 : /* no outputs */
1551 : "i" (ASI_ESTATE_ERROR_EN),
1552 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1553 : "g1");
1554
1555 (void) cheetah_recheck_errors(NULL);
1556
1557 pci_poke_faulted = 1;
1558 regs->tpc += 4;
1559 regs->tnpc = regs->tpc + 4;
1560 return;
1561 }
1562 #endif
1563
1564 p = cheetah_get_error_log(afsr);
1565 if (!p) {
1566 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1567 afsr, afar);
1568 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1569 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1570 prom_halt();
1571 }
1572
1573 /* Grab snapshot of logged error. */
1574 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1575
1576 /* If the current trap snapshot does not match what the
1577 * trap handler passed along into our args, big trouble.
1578 * In such a case, mark the local copy as invalid.
1579 *
1580 * Else, it matches and we mark the afsr in the non-local
1581 * copy as invalid so we may log new error traps there.
1582 */
1583 if (p->afsr != afsr || p->afar != afar)
1584 local_snapshot.afsr = CHAFSR_INVALID;
1585 else
1586 p->afsr = CHAFSR_INVALID;
1587
1588 is_memory = cheetah_check_main_memory(afar);
1589
1590 {
1591 int flush_all, flush_line;
1592
1593 flush_all = flush_line = 0;
1594 if ((afsr & CHAFSR_EDU) != 0UL) {
1595 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1596 flush_line = 1;
1597 else
1598 flush_all = 1;
1599 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1600 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1601 flush_line = 1;
1602 else
1603 flush_all = 1;
1604 }
1605
1606 cheetah_flush_icache();
1607 cheetah_flush_dcache();
1608
1609 /* Re-enable I/D caches */
1610 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1611 "or %%g1, %1, %%g1\n\t"
1612 "stxa %%g1, [%%g0] %0\n\t"
1613 "membar #Sync"
1614 : /* no outputs */
1615 : "i" (ASI_DCU_CONTROL_REG),
1616 "i" (DCU_IC | DCU_DC)
1617 : "g1");
1618
1619 if (flush_all)
1620 cheetah_flush_ecache();
1621 else if (flush_line)
1622 cheetah_flush_ecache_line(afar);
1623 }
1624
1625 /* Re-enable error reporting */
1626 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1627 "or %%g1, %1, %%g1\n\t"
1628 "stxa %%g1, [%%g0] %0\n\t"
1629 "membar #Sync"
1630 : /* no outputs */
1631 : "i" (ASI_ESTATE_ERROR_EN),
1632 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1633 : "g1");
1634
1635 /* Decide if we can continue after handling this trap and
1636 * logging the error.
1637 */
1638 recoverable = 1;
1639 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1640 recoverable = 0;
1641
1642 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1643 * error was logged while we had error reporting traps disabled.
1644 */
1645 if (cheetah_recheck_errors(&local_snapshot)) {
1646 unsigned long new_afsr = local_snapshot.afsr;
1647
1648 /* If we got a new asynchronous error, die... */
1649 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1650 CHAFSR_WDU | CHAFSR_CPU |
1651 CHAFSR_IVU | CHAFSR_UE |
1652 CHAFSR_BERR | CHAFSR_TO))
1653 recoverable = 0;
1654 }
1655
1656 /* Log errors. */
1657 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1658
1659 /* "Recoverable" here means we try to yank the page from ever
1660 * being newly used again. This depends upon a few things:
1661 * 1) Must be main memory, and AFAR must be valid.
1662 * 2) If we trapped from user, OK.
1663 * 3) Else, if we trapped from kernel we must find exception
1664 * table entry (ie. we have to have been accessing user
1665 * space).
1666 *
1667 * If AFAR is not in main memory, or we trapped from kernel
1668 * and cannot find an exception table entry, it is unacceptable
1669 * to try and continue.
1670 */
1671 if (recoverable && is_memory) {
1672 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1673 /* OK, usermode access. */
1674 recoverable = 1;
1675 } else {
1676 const struct exception_table_entry *entry;
1677
1678 entry = search_exception_tables(regs->tpc);
1679 if (entry) {
1680 /* OK, kernel access to userspace. */
1681 recoverable = 1;
1682
1683 } else {
1684 /* BAD, privileged state is corrupted. */
1685 recoverable = 0;
1686 }
1687
1688 if (recoverable) {
1689 if (pfn_valid(afar >> PAGE_SHIFT))
1690 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1691 else
1692 recoverable = 0;
1693
1694 /* Only perform fixup if we still have a
1695 * recoverable condition.
1696 */
1697 if (recoverable) {
1698 regs->tpc = entry->fixup;
1699 regs->tnpc = regs->tpc + 4;
1700 }
1701 }
1702 }
1703 } else {
1704 recoverable = 0;
1705 }
1706
1707 if (!recoverable)
1708 panic("Irrecoverable deferred error trap.\n");
1709 }
1710
1711 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1712 *
1713 * Bit0: 0=dcache,1=icache
1714 * Bit1: 0=recoverable,1=unrecoverable
1715 *
1716 * The hardware has disabled both the I-cache and D-cache in
1717 * the %dcr register.
1718 */
1719 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1720 {
1721 if (type & 0x1)
1722 __cheetah_flush_icache();
1723 else
1724 cheetah_plus_zap_dcache_parity();
1725 cheetah_flush_dcache();
1726
1727 /* Re-enable I-cache/D-cache */
1728 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1729 "or %%g1, %1, %%g1\n\t"
1730 "stxa %%g1, [%%g0] %0\n\t"
1731 "membar #Sync"
1732 : /* no outputs */
1733 : "i" (ASI_DCU_CONTROL_REG),
1734 "i" (DCU_DC | DCU_IC)
1735 : "g1");
1736
1737 if (type & 0x2) {
1738 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1739 smp_processor_id(),
1740 (type & 0x1) ? 'I' : 'D',
1741 regs->tpc);
1742 panic("Irrecoverable Cheetah+ parity error.");
1743 }
1744
1745 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1746 smp_processor_id(),
1747 (type & 0x1) ? 'I' : 'D',
1748 regs->tpc);
1749 }
1750
1751 struct sun4v_error_entry {
1752 u64 err_handle;
1753 u64 err_stick;
1754
1755 u32 err_type;
1756 #define SUN4V_ERR_TYPE_UNDEFINED 0
1757 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1758 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1759 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1760 #define SUN4V_ERR_TYPE_WARNING_RES 4
1761
1762 u32 err_attrs;
1763 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1764 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1765 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1766 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1767 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1768 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1769 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1770 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1771
1772 u64 err_raddr;
1773 u32 err_size;
1774 u16 err_cpu;
1775 u16 err_pad;
1776 };
1777
1778 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1779 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1780
1781 static const char *sun4v_err_type_to_str(u32 type)
1782 {
1783 switch (type) {
1784 case SUN4V_ERR_TYPE_UNDEFINED:
1785 return "undefined";
1786 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1787 return "uncorrected resumable";
1788 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1789 return "precise nonresumable";
1790 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1791 return "deferred nonresumable";
1792 case SUN4V_ERR_TYPE_WARNING_RES:
1793 return "warning resumable";
1794 default:
1795 return "unknown";
1796 };
1797 }
1798
1799 static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1800 {
1801 int cnt;
1802
1803 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1804 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1805 pfx,
1806 ent->err_handle, ent->err_stick,
1807 ent->err_type,
1808 sun4v_err_type_to_str(ent->err_type));
1809 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1810 pfx,
1811 ent->err_attrs,
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1813 "processor" : ""),
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1815 "memory" : ""),
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1817 "pio" : ""),
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1819 "integer-regs" : ""),
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1821 "fpu-regs" : ""),
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1823 "user" : ""),
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1825 "privileged" : ""),
1826 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1827 "queue-full" : ""));
1828 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1829 pfx,
1830 ent->err_raddr, ent->err_size, ent->err_cpu);
1831
1832 if ((cnt = atomic_read(ocnt)) != 0) {
1833 atomic_set(ocnt, 0);
1834 wmb();
1835 printk("%s: Queue overflowed %d times.\n",
1836 pfx, cnt);
1837 }
1838 }
1839
1840 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1841 * Log the event and clear the first word of the entry.
1842 */
1843 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1844 {
1845 struct sun4v_error_entry *ent, local_copy;
1846 struct trap_per_cpu *tb;
1847 unsigned long paddr;
1848 int cpu;
1849
1850 cpu = get_cpu();
1851
1852 tb = &trap_block[cpu];
1853 paddr = tb->resum_kernel_buf_pa + offset;
1854 ent = __va(paddr);
1855
1856 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1857
1858 /* We have a local copy now, so release the entry. */
1859 ent->err_handle = 0;
1860 wmb();
1861
1862 put_cpu();
1863
1864 sun4v_log_error(&local_copy, cpu,
1865 KERN_ERR "RESUMABLE ERROR",
1866 &sun4v_resum_oflow_cnt);
1867 }
1868
1869 /* If we try to printk() we'll probably make matters worse, by trying
1870 * to retake locks this cpu already holds or causing more errors. So
1871 * just bump a counter, and we'll report these counter bumps above.
1872 */
1873 void sun4v_resum_overflow(struct pt_regs *regs)
1874 {
1875 atomic_inc(&sun4v_resum_oflow_cnt);
1876 }
1877
1878 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1879 * Log the event, clear the first word of the entry, and die.
1880 */
1881 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1882 {
1883 struct sun4v_error_entry *ent, local_copy;
1884 struct trap_per_cpu *tb;
1885 unsigned long paddr;
1886 int cpu;
1887
1888 cpu = get_cpu();
1889
1890 tb = &trap_block[cpu];
1891 paddr = tb->nonresum_kernel_buf_pa + offset;
1892 ent = __va(paddr);
1893
1894 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1895
1896 /* We have a local copy now, so release the entry. */
1897 ent->err_handle = 0;
1898 wmb();
1899
1900 put_cpu();
1901
1902 #ifdef CONFIG_PCI
1903 /* Check for the special PCI poke sequence. */
1904 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1905 pci_poke_faulted = 1;
1906 regs->tpc += 4;
1907 regs->tnpc = regs->tpc + 4;
1908 return;
1909 }
1910 #endif
1911
1912 sun4v_log_error(&local_copy, cpu,
1913 KERN_EMERG "NON-RESUMABLE ERROR",
1914 &sun4v_nonresum_oflow_cnt);
1915
1916 panic("Non-resumable error.");
1917 }
1918
1919 /* If we try to printk() we'll probably make matters worse, by trying
1920 * to retake locks this cpu already holds or causing more errors. So
1921 * just bump a counter, and we'll report these counter bumps above.
1922 */
1923 void sun4v_nonresum_overflow(struct pt_regs *regs)
1924 {
1925 /* XXX Actually even this can make not that much sense. Perhaps
1926 * XXX we should just pull the plug and panic directly from here?
1927 */
1928 atomic_inc(&sun4v_nonresum_oflow_cnt);
1929 }
1930
1931 unsigned long sun4v_err_itlb_vaddr;
1932 unsigned long sun4v_err_itlb_ctx;
1933 unsigned long sun4v_err_itlb_pte;
1934 unsigned long sun4v_err_itlb_error;
1935
1936 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1937 {
1938 if (tl > 1)
1939 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1940
1941 printk("SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl);
1942 printk("SUN4V-ITLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n",
1943 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1944 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1945 prom_halt();
1946 }
1947
1948 unsigned long sun4v_err_dtlb_vaddr;
1949 unsigned long sun4v_err_dtlb_ctx;
1950 unsigned long sun4v_err_dtlb_pte;
1951 unsigned long sun4v_err_dtlb_error;
1952
1953 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1954 {
1955 if (tl > 1)
1956 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1957
1958 printk("SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl);
1959 printk("SUN4V-DTLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n",
1960 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1961 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1962 prom_halt();
1963 }
1964
1965 void do_fpe_common(struct pt_regs *regs)
1966 {
1967 if (regs->tstate & TSTATE_PRIV) {
1968 regs->tpc = regs->tnpc;
1969 regs->tnpc += 4;
1970 } else {
1971 unsigned long fsr = current_thread_info()->xfsr[0];
1972 siginfo_t info;
1973
1974 if (test_thread_flag(TIF_32BIT)) {
1975 regs->tpc &= 0xffffffff;
1976 regs->tnpc &= 0xffffffff;
1977 }
1978 info.si_signo = SIGFPE;
1979 info.si_errno = 0;
1980 info.si_addr = (void __user *)regs->tpc;
1981 info.si_trapno = 0;
1982 info.si_code = __SI_FAULT;
1983 if ((fsr & 0x1c000) == (1 << 14)) {
1984 if (fsr & 0x10)
1985 info.si_code = FPE_FLTINV;
1986 else if (fsr & 0x08)
1987 info.si_code = FPE_FLTOVF;
1988 else if (fsr & 0x04)
1989 info.si_code = FPE_FLTUND;
1990 else if (fsr & 0x02)
1991 info.si_code = FPE_FLTDIV;
1992 else if (fsr & 0x01)
1993 info.si_code = FPE_FLTRES;
1994 }
1995 force_sig_info(SIGFPE, &info, current);
1996 }
1997 }
1998
1999 void do_fpieee(struct pt_regs *regs)
2000 {
2001 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2002 0, 0x24, SIGFPE) == NOTIFY_STOP)
2003 return;
2004
2005 do_fpe_common(regs);
2006 }
2007
2008 extern int do_mathemu(struct pt_regs *, struct fpustate *);
2009
2010 void do_fpother(struct pt_regs *regs)
2011 {
2012 struct fpustate *f = FPUSTATE;
2013 int ret = 0;
2014
2015 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2016 0, 0x25, SIGFPE) == NOTIFY_STOP)
2017 return;
2018
2019 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2020 case (2 << 14): /* unfinished_FPop */
2021 case (3 << 14): /* unimplemented_FPop */
2022 ret = do_mathemu(regs, f);
2023 break;
2024 }
2025 if (ret)
2026 return;
2027 do_fpe_common(regs);
2028 }
2029
2030 void do_tof(struct pt_regs *regs)
2031 {
2032 siginfo_t info;
2033
2034 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2035 0, 0x26, SIGEMT) == NOTIFY_STOP)
2036 return;
2037
2038 if (regs->tstate & TSTATE_PRIV)
2039 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2040 if (test_thread_flag(TIF_32BIT)) {
2041 regs->tpc &= 0xffffffff;
2042 regs->tnpc &= 0xffffffff;
2043 }
2044 info.si_signo = SIGEMT;
2045 info.si_errno = 0;
2046 info.si_code = EMT_TAGOVF;
2047 info.si_addr = (void __user *)regs->tpc;
2048 info.si_trapno = 0;
2049 force_sig_info(SIGEMT, &info, current);
2050 }
2051
2052 void do_div0(struct pt_regs *regs)
2053 {
2054 siginfo_t info;
2055
2056 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2057 0, 0x28, SIGFPE) == NOTIFY_STOP)
2058 return;
2059
2060 if (regs->tstate & TSTATE_PRIV)
2061 die_if_kernel("TL0: Kernel divide by zero.", regs);
2062 if (test_thread_flag(TIF_32BIT)) {
2063 regs->tpc &= 0xffffffff;
2064 regs->tnpc &= 0xffffffff;
2065 }
2066 info.si_signo = SIGFPE;
2067 info.si_errno = 0;
2068 info.si_code = FPE_INTDIV;
2069 info.si_addr = (void __user *)regs->tpc;
2070 info.si_trapno = 0;
2071 force_sig_info(SIGFPE, &info, current);
2072 }
2073
2074 void instruction_dump (unsigned int *pc)
2075 {
2076 int i;
2077
2078 if ((((unsigned long) pc) & 3))
2079 return;
2080
2081 printk("Instruction DUMP:");
2082 for (i = -3; i < 6; i++)
2083 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2084 printk("\n");
2085 }
2086
2087 static void user_instruction_dump (unsigned int __user *pc)
2088 {
2089 int i;
2090 unsigned int buf[9];
2091
2092 if ((((unsigned long) pc) & 3))
2093 return;
2094
2095 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2096 return;
2097
2098 printk("Instruction DUMP:");
2099 for (i = 0; i < 9; i++)
2100 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2101 printk("\n");
2102 }
2103
2104 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2105 {
2106 unsigned long pc, fp, thread_base, ksp;
2107 void *tp = task_stack_page(tsk);
2108 struct reg_window *rw;
2109 int count = 0;
2110
2111 ksp = (unsigned long) _ksp;
2112
2113 if (tp == current_thread_info())
2114 flushw_all();
2115
2116 fp = ksp + STACK_BIAS;
2117 thread_base = (unsigned long) tp;
2118
2119 printk("Call Trace:");
2120 #ifdef CONFIG_KALLSYMS
2121 printk("\n");
2122 #endif
2123 do {
2124 /* Bogus frame pointer? */
2125 if (fp < (thread_base + sizeof(struct thread_info)) ||
2126 fp >= (thread_base + THREAD_SIZE))
2127 break;
2128 rw = (struct reg_window *)fp;
2129 pc = rw->ins[7];
2130 printk(" [%016lx] ", pc);
2131 print_symbol("%s\n", pc);
2132 fp = rw->ins[6] + STACK_BIAS;
2133 } while (++count < 16);
2134 #ifndef CONFIG_KALLSYMS
2135 printk("\n");
2136 #endif
2137 }
2138
2139 void dump_stack(void)
2140 {
2141 unsigned long *ksp;
2142
2143 __asm__ __volatile__("mov %%fp, %0"
2144 : "=r" (ksp));
2145 show_stack(current, ksp);
2146 }
2147
2148 EXPORT_SYMBOL(dump_stack);
2149
2150 static inline int is_kernel_stack(struct task_struct *task,
2151 struct reg_window *rw)
2152 {
2153 unsigned long rw_addr = (unsigned long) rw;
2154 unsigned long thread_base, thread_end;
2155
2156 if (rw_addr < PAGE_OFFSET) {
2157 if (task != &init_task)
2158 return 0;
2159 }
2160
2161 thread_base = (unsigned long) task_stack_page(task);
2162 thread_end = thread_base + sizeof(union thread_union);
2163 if (rw_addr >= thread_base &&
2164 rw_addr < thread_end &&
2165 !(rw_addr & 0x7UL))
2166 return 1;
2167
2168 return 0;
2169 }
2170
2171 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2172 {
2173 unsigned long fp = rw->ins[6];
2174
2175 if (!fp)
2176 return NULL;
2177
2178 return (struct reg_window *) (fp + STACK_BIAS);
2179 }
2180
2181 void die_if_kernel(char *str, struct pt_regs *regs)
2182 {
2183 static int die_counter;
2184 extern void __show_regs(struct pt_regs * regs);
2185 extern void smp_report_regs(void);
2186 int count = 0;
2187
2188 /* Amuse the user. */
2189 printk(
2190 " \\|/ ____ \\|/\n"
2191 " \"@'/ .. \\`@\"\n"
2192 " /_| \\__/ |_\\\n"
2193 " \\__U_/\n");
2194
2195 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2196 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2197 __asm__ __volatile__("flushw");
2198 __show_regs(regs);
2199 if (regs->tstate & TSTATE_PRIV) {
2200 struct reg_window *rw = (struct reg_window *)
2201 (regs->u_regs[UREG_FP] + STACK_BIAS);
2202
2203 /* Stop the back trace when we hit userland or we
2204 * find some badly aligned kernel stack.
2205 */
2206 while (rw &&
2207 count++ < 30&&
2208 is_kernel_stack(current, rw)) {
2209 printk("Caller[%016lx]", rw->ins[7]);
2210 print_symbol(": %s", rw->ins[7]);
2211 printk("\n");
2212
2213 rw = kernel_stack_up(rw);
2214 }
2215 instruction_dump ((unsigned int *) regs->tpc);
2216 } else {
2217 if (test_thread_flag(TIF_32BIT)) {
2218 regs->tpc &= 0xffffffff;
2219 regs->tnpc &= 0xffffffff;
2220 }
2221 user_instruction_dump ((unsigned int __user *) regs->tpc);
2222 }
2223 #ifdef CONFIG_SMP
2224 smp_report_regs();
2225 #endif
2226
2227 if (regs->tstate & TSTATE_PRIV)
2228 do_exit(SIGKILL);
2229 do_exit(SIGSEGV);
2230 }
2231
2232 extern int handle_popc(u32 insn, struct pt_regs *regs);
2233 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2234
2235 void do_illegal_instruction(struct pt_regs *regs)
2236 {
2237 unsigned long pc = regs->tpc;
2238 unsigned long tstate = regs->tstate;
2239 u32 insn;
2240 siginfo_t info;
2241
2242 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2243 0, 0x10, SIGILL) == NOTIFY_STOP)
2244 return;
2245
2246 if (tstate & TSTATE_PRIV)
2247 die_if_kernel("Kernel illegal instruction", regs);
2248 if (test_thread_flag(TIF_32BIT))
2249 pc = (u32)pc;
2250 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2251 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2252 if (handle_popc(insn, regs))
2253 return;
2254 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2255 if (handle_ldf_stq(insn, regs))
2256 return;
2257 }
2258 }
2259 info.si_signo = SIGILL;
2260 info.si_errno = 0;
2261 info.si_code = ILL_ILLOPC;
2262 info.si_addr = (void __user *)pc;
2263 info.si_trapno = 0;
2264 force_sig_info(SIGILL, &info, current);
2265 }
2266
2267 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2268
2269 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2270 {
2271 siginfo_t info;
2272
2273 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2274 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2275 return;
2276
2277 if (regs->tstate & TSTATE_PRIV) {
2278 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2279 return;
2280 }
2281 info.si_signo = SIGBUS;
2282 info.si_errno = 0;
2283 info.si_code = BUS_ADRALN;
2284 info.si_addr = (void __user *)sfar;
2285 info.si_trapno = 0;
2286 force_sig_info(SIGBUS, &info, current);
2287 }
2288
2289 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2290 {
2291 siginfo_t info;
2292
2293 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2294 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2295 return;
2296
2297 if (regs->tstate & TSTATE_PRIV) {
2298 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2299 return;
2300 }
2301 info.si_signo = SIGBUS;
2302 info.si_errno = 0;
2303 info.si_code = BUS_ADRALN;
2304 info.si_addr = (void __user *) addr;
2305 info.si_trapno = 0;
2306 force_sig_info(SIGBUS, &info, current);
2307 }
2308
2309 void do_privop(struct pt_regs *regs)
2310 {
2311 siginfo_t info;
2312
2313 if (notify_die(DIE_TRAP, "privileged operation", regs,
2314 0, 0x11, SIGILL) == NOTIFY_STOP)
2315 return;
2316
2317 if (test_thread_flag(TIF_32BIT)) {
2318 regs->tpc &= 0xffffffff;
2319 regs->tnpc &= 0xffffffff;
2320 }
2321 info.si_signo = SIGILL;
2322 info.si_errno = 0;
2323 info.si_code = ILL_PRVOPC;
2324 info.si_addr = (void __user *)regs->tpc;
2325 info.si_trapno = 0;
2326 force_sig_info(SIGILL, &info, current);
2327 }
2328
2329 void do_privact(struct pt_regs *regs)
2330 {
2331 do_privop(regs);
2332 }
2333
2334 /* Trap level 1 stuff or other traps we should never see... */
2335 void do_cee(struct pt_regs *regs)
2336 {
2337 die_if_kernel("TL0: Cache Error Exception", regs);
2338 }
2339
2340 void do_cee_tl1(struct pt_regs *regs)
2341 {
2342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2343 die_if_kernel("TL1: Cache Error Exception", regs);
2344 }
2345
2346 void do_dae_tl1(struct pt_regs *regs)
2347 {
2348 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2349 die_if_kernel("TL1: Data Access Exception", regs);
2350 }
2351
2352 void do_iae_tl1(struct pt_regs *regs)
2353 {
2354 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2355 die_if_kernel("TL1: Instruction Access Exception", regs);
2356 }
2357
2358 void do_div0_tl1(struct pt_regs *regs)
2359 {
2360 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2361 die_if_kernel("TL1: DIV0 Exception", regs);
2362 }
2363
2364 void do_fpdis_tl1(struct pt_regs *regs)
2365 {
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: FPU Disabled", regs);
2368 }
2369
2370 void do_fpieee_tl1(struct pt_regs *regs)
2371 {
2372 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2373 die_if_kernel("TL1: FPU IEEE Exception", regs);
2374 }
2375
2376 void do_fpother_tl1(struct pt_regs *regs)
2377 {
2378 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2379 die_if_kernel("TL1: FPU Other Exception", regs);
2380 }
2381
2382 void do_ill_tl1(struct pt_regs *regs)
2383 {
2384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2385 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2386 }
2387
2388 void do_irq_tl1(struct pt_regs *regs)
2389 {
2390 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2391 die_if_kernel("TL1: IRQ Exception", regs);
2392 }
2393
2394 void do_lddfmna_tl1(struct pt_regs *regs)
2395 {
2396 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2397 die_if_kernel("TL1: LDDF Exception", regs);
2398 }
2399
2400 void do_stdfmna_tl1(struct pt_regs *regs)
2401 {
2402 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2403 die_if_kernel("TL1: STDF Exception", regs);
2404 }
2405
2406 void do_paw(struct pt_regs *regs)
2407 {
2408 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2409 }
2410
2411 void do_paw_tl1(struct pt_regs *regs)
2412 {
2413 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2414 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2415 }
2416
2417 void do_vaw(struct pt_regs *regs)
2418 {
2419 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2420 }
2421
2422 void do_vaw_tl1(struct pt_regs *regs)
2423 {
2424 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2425 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2426 }
2427
2428 void do_tof_tl1(struct pt_regs *regs)
2429 {
2430 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2431 die_if_kernel("TL1: Tag Overflow Exception", regs);
2432 }
2433
2434 void do_getpsr(struct pt_regs *regs)
2435 {
2436 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2437 regs->tpc = regs->tnpc;
2438 regs->tnpc += 4;
2439 if (test_thread_flag(TIF_32BIT)) {
2440 regs->tpc &= 0xffffffff;
2441 regs->tnpc &= 0xffffffff;
2442 }
2443 }
2444
2445 struct trap_per_cpu trap_block[NR_CPUS];
2446
2447 /* This can get invoked before sched_init() so play it super safe
2448 * and use hard_smp_processor_id().
2449 */
2450 void init_cur_cpu_trap(struct thread_info *t)
2451 {
2452 int cpu = hard_smp_processor_id();
2453 struct trap_per_cpu *p = &trap_block[cpu];
2454
2455 p->thread = t;
2456 p->pgd_paddr = 0;
2457 }
2458
2459 extern void thread_info_offsets_are_bolixed_dave(void);
2460 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2461
2462 /* Only invoked on boot processor. */
2463 void __init trap_init(void)
2464 {
2465 /* Compile time sanity check. */
2466 if (TI_TASK != offsetof(struct thread_info, task) ||
2467 TI_FLAGS != offsetof(struct thread_info, flags) ||
2468 TI_CPU != offsetof(struct thread_info, cpu) ||
2469 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2470 TI_KSP != offsetof(struct thread_info, ksp) ||
2471 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2472 TI_KREGS != offsetof(struct thread_info, kregs) ||
2473 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2474 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2475 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2476 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2477 TI_GSR != offsetof(struct thread_info, gsr) ||
2478 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2479 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2480 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2481 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2482 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2483 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2484 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2485 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2486 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2487 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2488 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2489 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2490 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2491 (TI_FPREGS & (64 - 1)))
2492 thread_info_offsets_are_bolixed_dave();
2493
2494 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2495 (TRAP_PER_CPU_PGD_PADDR !=
2496 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2497 (TRAP_PER_CPU_CPU_MONDO_PA !=
2498 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2499 (TRAP_PER_CPU_DEV_MONDO_PA !=
2500 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2501 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2502 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2503 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2504 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2505 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2506 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2507 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2508 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2509 (TRAP_PER_CPU_FAULT_INFO !=
2510 offsetof(struct trap_per_cpu, fault_info)) ||
2511 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2512 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2513 (TRAP_PER_CPU_CPU_LIST_PA !=
2514 offsetof(struct trap_per_cpu, cpu_list_pa)))
2515 trap_per_cpu_offsets_are_bolixed_dave();
2516
2517 /* Attach to the address space of init_task. On SMP we
2518 * do this in smp.c:smp_callin for other cpus.
2519 */
2520 atomic_inc(&init_mm.mm_count);
2521 current->active_mm = &init_mm;
2522 }
This page took 0.085963 seconds and 5 git commands to generate.