[SPARC64]: Sun4v interrupt handling.
[deliverable/linux.git] / arch / sparc64 / kernel / traps.c
CommitLineData
1da177e4
LT
1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
6c52a96e 36#include <asm/sfafsr.h>
1da177e4
LT
37#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
40#include <asm/kdebug.h>
92704a1c 41#include <asm/head.h>
1da177e4
LT
42#ifdef CONFIG_KMOD
43#include <linux/kmod.h>
44#endif
45
46struct notifier_block *sparc64die_chain;
47static DEFINE_SPINLOCK(die_notifier_lock);
48
49int register_die_notifier(struct notifier_block *nb)
50{
51 int err = 0;
52 unsigned long flags;
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
56 return err;
57}
58
59/* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
62 * is as follows:
63 */
64struct tl1_traplog {
65 struct {
66 unsigned long tstate;
67 unsigned long tpc;
68 unsigned long tnpc;
69 unsigned long tt;
70 } trapstack[4];
71 unsigned long tl;
72};
73
74static void dump_tl1_traplog(struct tl1_traplog *p)
75{
76 int i;
77
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 p->tl);
80 for (i = 0; i < 4; i++) {
81 printk(KERN_CRIT
82 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
83 "TNPC[%016lx] TT[%lx]\n",
84 i + 1,
85 p->trapstack[i].tstate, p->trapstack[i].tpc,
86 p->trapstack[i].tnpc, p->trapstack[i].tt);
87 }
88}
89
90void do_call_debug(struct pt_regs *regs)
91{
92 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
93}
94
95void bad_trap(struct pt_regs *regs, long lvl)
96{
97 char buffer[32];
98 siginfo_t info;
99
100 if (notify_die(DIE_TRAP, "bad trap", regs,
101 0, lvl, SIGTRAP) == NOTIFY_STOP)
102 return;
103
104 if (lvl < 0x100) {
105 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
106 die_if_kernel(buffer, regs);
107 }
108
109 lvl -= 0x100;
110 if (regs->tstate & TSTATE_PRIV) {
111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
112 die_if_kernel(buffer, regs);
113 }
114 if (test_thread_flag(TIF_32BIT)) {
115 regs->tpc &= 0xffffffff;
116 regs->tnpc &= 0xffffffff;
117 }
118 info.si_signo = SIGILL;
119 info.si_errno = 0;
120 info.si_code = ILL_ILLTRP;
121 info.si_addr = (void __user *)regs->tpc;
122 info.si_trapno = lvl;
123 force_sig_info(SIGILL, &info, current);
124}
125
126void bad_trap_tl1(struct pt_regs *regs, long lvl)
127{
128 char buffer[32];
129
130 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
131 0, lvl, SIGTRAP) == NOTIFY_STOP)
132 return;
133
134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
135
136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
137 die_if_kernel (buffer, regs);
138}
139
140#ifdef CONFIG_DEBUG_BUGVERBOSE
141void do_BUG(const char *file, int line)
142{
143 bust_spinlocks(1);
144 printk("kernel BUG at %s:%d!\n", file, line);
145}
146#endif
147
6c52a96e 148void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
149{
150 siginfo_t info;
151
152 if (notify_die(DIE_TRAP, "instruction access exception", regs,
153 0, 0x8, SIGTRAP) == NOTIFY_STOP)
154 return;
155
156 if (regs->tstate & TSTATE_PRIV) {
6c52a96e
DM
157 printk("spitfire_insn_access_exception: SFSR[%016lx] "
158 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
159 die_if_kernel("Iax", regs);
160 }
161 if (test_thread_flag(TIF_32BIT)) {
162 regs->tpc &= 0xffffffff;
163 regs->tnpc &= 0xffffffff;
164 }
165 info.si_signo = SIGSEGV;
166 info.si_errno = 0;
167 info.si_code = SEGV_MAPERR;
168 info.si_addr = (void __user *)regs->tpc;
169 info.si_trapno = 0;
170 force_sig_info(SIGSEGV, &info, current);
171}
172
6c52a96e 173void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
174{
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
178
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 180 spitfire_insn_access_exception(regs, sfsr, sfar);
1da177e4
LT
181}
182
6c52a96e 183void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
184{
185 siginfo_t info;
186
187 if (notify_die(DIE_TRAP, "data access exception", regs,
188 0, 0x30, SIGTRAP) == NOTIFY_STOP)
189 return;
190
191 if (regs->tstate & TSTATE_PRIV) {
192 /* Test if this comes from uaccess places. */
8cf14af0 193 const struct exception_table_entry *entry;
1da177e4 194
8cf14af0
DM
195 entry = search_exception_tables(regs->tpc);
196 if (entry) {
197 /* Ouch, somebody is trying VM hole tricks on us... */
1da177e4
LT
198#ifdef DEBUG_EXCEPTIONS
199 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
8cf14af0
DM
200 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
201 regs->tpc, entry->fixup);
1da177e4 202#endif
8cf14af0 203 regs->tpc = entry->fixup;
1da177e4 204 regs->tnpc = regs->tpc + 4;
1da177e4
LT
205 return;
206 }
207 /* Shit... */
6c52a96e
DM
208 printk("spitfire_data_access_exception: SFSR[%016lx] "
209 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
210 die_if_kernel("Dax", regs);
211 }
212
213 info.si_signo = SIGSEGV;
214 info.si_errno = 0;
215 info.si_code = SEGV_MAPERR;
216 info.si_addr = (void __user *)sfar;
217 info.si_trapno = 0;
218 force_sig_info(SIGSEGV, &info, current);
219}
220
6c52a96e 221void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
bde4e4ee
DM
222{
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
225 return;
226
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 228 spitfire_data_access_exception(regs, sfsr, sfar);
bde4e4ee
DM
229}
230
1da177e4
LT
231#ifdef CONFIG_PCI
232/* This is really pathetic... */
233extern volatile int pci_poke_in_progress;
234extern volatile int pci_poke_cpu;
235extern volatile int pci_poke_faulted;
236#endif
237
238/* When access exceptions happen, we must do this. */
239static void spitfire_clean_and_reenable_l1_caches(void)
240{
241 unsigned long va;
242
243 if (tlb_type != spitfire)
244 BUG();
245
246 /* Clean 'em. */
247 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
248 spitfire_put_icache_tag(va, 0x0);
249 spitfire_put_dcache_tag(va, 0x0);
250 }
251
252 /* Re-enable in LSU. */
253 __asm__ __volatile__("flush %%g6\n\t"
254 "membar #Sync\n\t"
255 "stxa %0, [%%g0] %1\n\t"
256 "membar #Sync"
257 : /* no outputs */
258 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
259 LSU_CONTROL_IM | LSU_CONTROL_DM),
260 "i" (ASI_LSU_CONTROL)
261 : "memory");
262}
263
6c52a96e 264static void spitfire_enable_estate_errors(void)
1da177e4 265{
6c52a96e
DM
266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
267 "membar #Sync"
268 : /* no outputs */
269 : "r" (ESTATE_ERR_ALL),
270 "i" (ASI_ESTATE_ERROR_EN));
1da177e4
LT
271}
272
273static char ecc_syndrome_table[] = {
274 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
275 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
276 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
277 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
278 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
279 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
280 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
281 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
282 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
283 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
284 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
285 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
286 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
287 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
288 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
289 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
290 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
291 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
292 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
293 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
294 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
295 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
296 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
297 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
298 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
299 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
300 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
301 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
302 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
303 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
304 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
306};
307
1da177e4
LT
308static char *syndrome_unknown = "<Unknown>";
309
6c52a96e 310static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
1da177e4 311{
6c52a96e
DM
312 unsigned short scode;
313 char memmod_str[64], *p;
1da177e4 314
6c52a96e
DM
315 if (udbl & bit) {
316 scode = ecc_syndrome_table[udbl & 0xff];
1da177e4
LT
317 if (prom_getunumber(scode, afar,
318 memmod_str, sizeof(memmod_str)) == -1)
319 p = syndrome_unknown;
320 else
321 p = memmod_str;
322 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
323 "Memory Module \"%s\"\n",
324 smp_processor_id(), scode, p);
325 }
326
6c52a96e
DM
327 if (udbh & bit) {
328 scode = ecc_syndrome_table[udbh & 0xff];
1da177e4
LT
329 if (prom_getunumber(scode, afar,
330 memmod_str, sizeof(memmod_str)) == -1)
331 p = syndrome_unknown;
332 else
333 p = memmod_str;
334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
335 "Memory Module \"%s\"\n",
336 smp_processor_id(), scode, p);
337 }
6c52a96e
DM
338
339}
340
341static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
342{
343
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349
350 /* We always log it, even if someone is listening for this
351 * trap.
352 */
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
355
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
358 */
359 spitfire_enable_estate_errors();
360}
361
362static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
363{
364 siginfo_t info;
365
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
372 */
373
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375
376 /* We always log it, even if someone is listening for this
377 * trap.
378 */
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
380 0, tt, SIGTRAP);
381
382 if (regs->tstate & TSTATE_PRIV) {
383 if (tl1)
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
386 }
387
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
391 */
392
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
395
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
399 }
400 info.si_signo = SIGBUS;
401 info.si_errno = 0;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
404 info.si_trapno = 0;
405 force_sig_info(SIGBUS, &info, current);
406}
407
408void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409{
410 unsigned long afsr, tt, udbh, udbl;
411 int tl1;
412
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
418
419#ifdef CONFIG_PCI
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
424
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
427 return;
428 }
429#endif
430
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
437 */
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
442 "membar #Sync"
443 : /* no outputs */
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 }
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
450 "membar #Sync"
451 : /* no outputs */
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
454 }
455 }
456
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
458 }
1da177e4
LT
459}
460
816242da
DM
461int cheetah_pcache_forced_on;
462
463void cheetah_enable_pcache(void)
464{
465 unsigned long dcr;
466
467 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
468 smp_processor_id());
469
470 __asm__ __volatile__("ldxa [%%g0] %1, %0"
471 : "=r" (dcr)
472 : "i" (ASI_DCU_CONTROL_REG));
473 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
474 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
475 "membar #Sync"
476 : /* no outputs */
477 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
478}
479
1da177e4
LT
480/* Cheetah error trap handling. */
481static unsigned long ecache_flush_physbase;
482static unsigned long ecache_flush_linesize;
483static unsigned long ecache_flush_size;
484
485/* WARNING: The error trap handlers in assembly know the precise
486 * layout of the following structure.
487 *
488 * C-level handlers below use this information to log the error
489 * and then determine how to recover (if possible).
490 */
491struct cheetah_err_info {
492/*0x00*/u64 afsr;
493/*0x08*/u64 afar;
494
495 /* D-cache state */
496/*0x10*/u64 dcache_data[4]; /* The actual data */
497/*0x30*/u64 dcache_index; /* D-cache index */
498/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
499/*0x40*/u64 dcache_utag; /* D-cache microtag */
500/*0x48*/u64 dcache_stag; /* D-cache snooptag */
501
502 /* I-cache state */
503/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
504/*0x90*/u64 icache_index; /* I-cache index */
505/*0x98*/u64 icache_tag; /* I-cache phys tag */
506/*0xa0*/u64 icache_utag; /* I-cache microtag */
507/*0xa8*/u64 icache_stag; /* I-cache snooptag */
508/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
509/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
510
511 /* E-cache state */
512/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
513/*0xe0*/u64 ecache_index; /* E-cache index */
514/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
515
516/*0xf0*/u64 __pad[32 - 30];
517};
518#define CHAFSR_INVALID ((u64)-1L)
519
520/* This table is ordered in priority of errors and matches the
521 * AFAR overwrite policy as well.
522 */
523
524struct afsr_error_table {
525 unsigned long mask;
526 const char *name;
527};
528
529static const char CHAFSR_PERR_msg[] =
530 "System interface protocol error";
531static const char CHAFSR_IERR_msg[] =
532 "Internal processor error";
533static const char CHAFSR_ISAP_msg[] =
534 "System request parity error on incoming addresss";
535static const char CHAFSR_UCU_msg[] =
536 "Uncorrectable E-cache ECC error for ifetch/data";
537static const char CHAFSR_UCC_msg[] =
538 "SW Correctable E-cache ECC error for ifetch/data";
539static const char CHAFSR_UE_msg[] =
540 "Uncorrectable system bus data ECC error for read";
541static const char CHAFSR_EDU_msg[] =
542 "Uncorrectable E-cache ECC error for stmerge/blkld";
543static const char CHAFSR_EMU_msg[] =
544 "Uncorrectable system bus MTAG error";
545static const char CHAFSR_WDU_msg[] =
546 "Uncorrectable E-cache ECC error for writeback";
547static const char CHAFSR_CPU_msg[] =
548 "Uncorrectable ECC error for copyout";
549static const char CHAFSR_CE_msg[] =
550 "HW corrected system bus data ECC error for read";
551static const char CHAFSR_EDC_msg[] =
552 "HW corrected E-cache ECC error for stmerge/blkld";
553static const char CHAFSR_EMC_msg[] =
554 "HW corrected system bus MTAG ECC error";
555static const char CHAFSR_WDC_msg[] =
556 "HW corrected E-cache ECC error for writeback";
557static const char CHAFSR_CPC_msg[] =
558 "HW corrected ECC error for copyout";
559static const char CHAFSR_TO_msg[] =
560 "Unmapped error from system bus";
561static const char CHAFSR_BERR_msg[] =
562 "Bus error response from system bus";
563static const char CHAFSR_IVC_msg[] =
564 "HW corrected system bus data ECC error for ivec read";
565static const char CHAFSR_IVU_msg[] =
566 "Uncorrectable system bus data ECC error for ivec read";
567static struct afsr_error_table __cheetah_error_table[] = {
568 { CHAFSR_PERR, CHAFSR_PERR_msg },
569 { CHAFSR_IERR, CHAFSR_IERR_msg },
570 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
571 { CHAFSR_UCU, CHAFSR_UCU_msg },
572 { CHAFSR_UCC, CHAFSR_UCC_msg },
573 { CHAFSR_UE, CHAFSR_UE_msg },
574 { CHAFSR_EDU, CHAFSR_EDU_msg },
575 { CHAFSR_EMU, CHAFSR_EMU_msg },
576 { CHAFSR_WDU, CHAFSR_WDU_msg },
577 { CHAFSR_CPU, CHAFSR_CPU_msg },
578 { CHAFSR_CE, CHAFSR_CE_msg },
579 { CHAFSR_EDC, CHAFSR_EDC_msg },
580 { CHAFSR_EMC, CHAFSR_EMC_msg },
581 { CHAFSR_WDC, CHAFSR_WDC_msg },
582 { CHAFSR_CPC, CHAFSR_CPC_msg },
583 { CHAFSR_TO, CHAFSR_TO_msg },
584 { CHAFSR_BERR, CHAFSR_BERR_msg },
585 /* These two do not update the AFAR. */
586 { CHAFSR_IVC, CHAFSR_IVC_msg },
587 { CHAFSR_IVU, CHAFSR_IVU_msg },
588 { 0, NULL },
589};
590static const char CHPAFSR_DTO_msg[] =
591 "System bus unmapped error for prefetch/storequeue-read";
592static const char CHPAFSR_DBERR_msg[] =
593 "System bus error for prefetch/storequeue-read";
594static const char CHPAFSR_THCE_msg[] =
595 "Hardware corrected E-cache Tag ECC error";
596static const char CHPAFSR_TSCE_msg[] =
597 "SW handled correctable E-cache Tag ECC error";
598static const char CHPAFSR_TUE_msg[] =
599 "Uncorrectable E-cache Tag ECC error";
600static const char CHPAFSR_DUE_msg[] =
601 "System bus uncorrectable data ECC error due to prefetch/store-fill";
602static struct afsr_error_table __cheetah_plus_error_table[] = {
603 { CHAFSR_PERR, CHAFSR_PERR_msg },
604 { CHAFSR_IERR, CHAFSR_IERR_msg },
605 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
606 { CHAFSR_UCU, CHAFSR_UCU_msg },
607 { CHAFSR_UCC, CHAFSR_UCC_msg },
608 { CHAFSR_UE, CHAFSR_UE_msg },
609 { CHAFSR_EDU, CHAFSR_EDU_msg },
610 { CHAFSR_EMU, CHAFSR_EMU_msg },
611 { CHAFSR_WDU, CHAFSR_WDU_msg },
612 { CHAFSR_CPU, CHAFSR_CPU_msg },
613 { CHAFSR_CE, CHAFSR_CE_msg },
614 { CHAFSR_EDC, CHAFSR_EDC_msg },
615 { CHAFSR_EMC, CHAFSR_EMC_msg },
616 { CHAFSR_WDC, CHAFSR_WDC_msg },
617 { CHAFSR_CPC, CHAFSR_CPC_msg },
618 { CHAFSR_TO, CHAFSR_TO_msg },
619 { CHAFSR_BERR, CHAFSR_BERR_msg },
620 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
621 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
622 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
623 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
624 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
625 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
626 /* These two do not update the AFAR. */
627 { CHAFSR_IVC, CHAFSR_IVC_msg },
628 { CHAFSR_IVU, CHAFSR_IVU_msg },
629 { 0, NULL },
630};
631static const char JPAFSR_JETO_msg[] =
632 "System interface protocol error, hw timeout caused";
633static const char JPAFSR_SCE_msg[] =
634 "Parity error on system snoop results";
635static const char JPAFSR_JEIC_msg[] =
636 "System interface protocol error, illegal command detected";
637static const char JPAFSR_JEIT_msg[] =
638 "System interface protocol error, illegal ADTYPE detected";
639static const char JPAFSR_OM_msg[] =
640 "Out of range memory error has occurred";
641static const char JPAFSR_ETP_msg[] =
642 "Parity error on L2 cache tag SRAM";
643static const char JPAFSR_UMS_msg[] =
644 "Error due to unsupported store";
645static const char JPAFSR_RUE_msg[] =
646 "Uncorrectable ECC error from remote cache/memory";
647static const char JPAFSR_RCE_msg[] =
648 "Correctable ECC error from remote cache/memory";
649static const char JPAFSR_BP_msg[] =
650 "JBUS parity error on returned read data";
651static const char JPAFSR_WBP_msg[] =
652 "JBUS parity error on data for writeback or block store";
653static const char JPAFSR_FRC_msg[] =
654 "Foreign read to DRAM incurring correctable ECC error";
655static const char JPAFSR_FRU_msg[] =
656 "Foreign read to DRAM incurring uncorrectable ECC error";
657static struct afsr_error_table __jalapeno_error_table[] = {
658 { JPAFSR_JETO, JPAFSR_JETO_msg },
659 { JPAFSR_SCE, JPAFSR_SCE_msg },
660 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
661 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
662 { CHAFSR_PERR, CHAFSR_PERR_msg },
663 { CHAFSR_IERR, CHAFSR_IERR_msg },
664 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
665 { CHAFSR_UCU, CHAFSR_UCU_msg },
666 { CHAFSR_UCC, CHAFSR_UCC_msg },
667 { CHAFSR_UE, CHAFSR_UE_msg },
668 { CHAFSR_EDU, CHAFSR_EDU_msg },
669 { JPAFSR_OM, JPAFSR_OM_msg },
670 { CHAFSR_WDU, CHAFSR_WDU_msg },
671 { CHAFSR_CPU, CHAFSR_CPU_msg },
672 { CHAFSR_CE, CHAFSR_CE_msg },
673 { CHAFSR_EDC, CHAFSR_EDC_msg },
674 { JPAFSR_ETP, JPAFSR_ETP_msg },
675 { CHAFSR_WDC, CHAFSR_WDC_msg },
676 { CHAFSR_CPC, CHAFSR_CPC_msg },
677 { CHAFSR_TO, CHAFSR_TO_msg },
678 { CHAFSR_BERR, CHAFSR_BERR_msg },
679 { JPAFSR_UMS, JPAFSR_UMS_msg },
680 { JPAFSR_RUE, JPAFSR_RUE_msg },
681 { JPAFSR_RCE, JPAFSR_RCE_msg },
682 { JPAFSR_BP, JPAFSR_BP_msg },
683 { JPAFSR_WBP, JPAFSR_WBP_msg },
684 { JPAFSR_FRC, JPAFSR_FRC_msg },
685 { JPAFSR_FRU, JPAFSR_FRU_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVU, CHAFSR_IVU_msg },
688 { 0, NULL },
689};
690static struct afsr_error_table *cheetah_error_table;
691static unsigned long cheetah_afsr_errors;
692
693/* This is allocated at boot time based upon the largest hardware
694 * cpu ID in the system. We allocate two entries per cpu, one for
695 * TL==0 logging and one for TL >= 1 logging.
696 */
697struct cheetah_err_info *cheetah_error_log;
698
699static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
700{
701 struct cheetah_err_info *p;
702 int cpu = smp_processor_id();
703
704 if (!cheetah_error_log)
705 return NULL;
706
707 p = cheetah_error_log + (cpu * 2);
708 if ((afsr & CHAFSR_TL1) != 0UL)
709 p++;
710
711 return p;
712}
713
714extern unsigned int tl0_icpe[], tl1_icpe[];
715extern unsigned int tl0_dcpe[], tl1_dcpe[];
716extern unsigned int tl0_fecc[], tl1_fecc[];
717extern unsigned int tl0_cee[], tl1_cee[];
718extern unsigned int tl0_iae[], tl1_iae[];
719extern unsigned int tl0_dae[], tl1_dae[];
720extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
721extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
722extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
723extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
724extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
725
726void __init cheetah_ecache_flush_init(void)
727{
728 unsigned long largest_size, smallest_linesize, order, ver;
729 int node, i, instance;
730
731 /* Scan all cpu device tree nodes, note two values:
732 * 1) largest E-cache size
733 * 2) smallest E-cache line size
734 */
735 largest_size = 0UL;
736 smallest_linesize = ~0UL;
737
738 instance = 0;
739 while (!cpu_find_by_instance(instance, &node, NULL)) {
740 unsigned long val;
741
742 val = prom_getintdefault(node, "ecache-size",
743 (2 * 1024 * 1024));
744 if (val > largest_size)
745 largest_size = val;
746 val = prom_getintdefault(node, "ecache-line-size", 64);
747 if (val < smallest_linesize)
748 smallest_linesize = val;
749 instance++;
750 }
751
752 if (largest_size == 0UL || smallest_linesize == ~0UL) {
753 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
754 "parameters.\n");
755 prom_halt();
756 }
757
758 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize;
760
10147570 761 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
1da177e4 762
10147570 763 if (ecache_flush_physbase == ~0UL) {
1da177e4 764 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
10147570
DM
765 "contiguous physical memory.\n",
766 ecache_flush_size);
1da177e4
LT
767 prom_halt();
768 }
769
770 /* Now allocate error trap reporting scoreboard. */
771 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
772 for (order = 0; order < MAX_ORDER; order++) {
773 if ((PAGE_SIZE << order) >= node)
774 break;
775 }
776 cheetah_error_log = (struct cheetah_err_info *)
777 __get_free_pages(GFP_KERNEL, order);
778 if (!cheetah_error_log) {
779 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
780 "error logging scoreboard (%d bytes).\n", node);
781 prom_halt();
782 }
783 memset(cheetah_error_log, 0, PAGE_SIZE << order);
784
785 /* Mark all AFSRs as invalid so that the trap handler will
786 * log new new information there.
787 */
788 for (i = 0; i < 2 * NR_CPUS; i++)
789 cheetah_error_log[i].afsr = CHAFSR_INVALID;
790
791 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
792 if ((ver >> 32) == __JALAPENO_ID ||
793 (ver >> 32) == __SERRANO_ID) {
1da177e4
LT
794 cheetah_error_table = &__jalapeno_error_table[0];
795 cheetah_afsr_errors = JPAFSR_ERRORS;
796 } else if ((ver >> 32) == 0x003e0015) {
797 cheetah_error_table = &__cheetah_plus_error_table[0];
798 cheetah_afsr_errors = CHPAFSR_ERRORS;
799 } else {
800 cheetah_error_table = &__cheetah_error_table[0];
801 cheetah_afsr_errors = CHAFSR_ERRORS;
802 }
803
804 /* Now patch trap tables. */
805 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
806 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
807 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
808 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
809 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
810 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
811 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
812 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
813 if (tlb_type == cheetah_plus) {
814 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
815 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
816 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
817 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
818 }
819 flushi(PAGE_OFFSET);
820}
821
822static void cheetah_flush_ecache(void)
823{
824 unsigned long flush_base = ecache_flush_physbase;
825 unsigned long flush_linesize = ecache_flush_linesize;
826 unsigned long flush_size = ecache_flush_size;
827
828 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
829 " bne,pt %%xcc, 1b\n\t"
830 " ldxa [%2 + %0] %3, %%g0\n\t"
831 : "=&r" (flush_size)
832 : "0" (flush_size), "r" (flush_base),
833 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
834}
835
836static void cheetah_flush_ecache_line(unsigned long physaddr)
837{
838 unsigned long alias;
839
840 physaddr &= ~(8UL - 1UL);
841 physaddr = (ecache_flush_physbase +
842 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
843 alias = physaddr + (ecache_flush_size >> 1UL);
844 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
845 "ldxa [%1] %2, %%g0\n\t"
846 "membar #Sync"
847 : /* no outputs */
848 : "r" (physaddr), "r" (alias),
849 "i" (ASI_PHYS_USE_EC));
850}
851
852/* Unfortunately, the diagnostic access to the I-cache tags we need to
853 * use to clear the thing interferes with I-cache coherency transactions.
854 *
855 * So we must only flush the I-cache when it is disabled.
856 */
857static void __cheetah_flush_icache(void)
858{
80dc0d6b
DM
859 unsigned int icache_size, icache_line_size;
860 unsigned long addr;
861
862 icache_size = local_cpu_data().icache_size;
863 icache_line_size = local_cpu_data().icache_line_size;
1da177e4
LT
864
865 /* Clear the valid bits in all the tags. */
80dc0d6b 866 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1da177e4
LT
867 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
868 "membar #Sync"
869 : /* no outputs */
80dc0d6b
DM
870 : "r" (addr | (2 << 3)),
871 "i" (ASI_IC_TAG));
1da177e4
LT
872 }
873}
874
875static void cheetah_flush_icache(void)
876{
877 unsigned long dcu_save;
878
879 /* Save current DCU, disable I-cache. */
880 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
881 "or %0, %2, %%g1\n\t"
882 "stxa %%g1, [%%g0] %1\n\t"
883 "membar #Sync"
884 : "=r" (dcu_save)
885 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
886 : "g1");
887
888 __cheetah_flush_icache();
889
890 /* Restore DCU register */
891 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
892 "membar #Sync"
893 : /* no outputs */
894 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
895}
896
897static void cheetah_flush_dcache(void)
898{
80dc0d6b
DM
899 unsigned int dcache_size, dcache_line_size;
900 unsigned long addr;
901
902 dcache_size = local_cpu_data().dcache_size;
903 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 904
80dc0d6b 905 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1da177e4
LT
906 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
907 "membar #Sync"
908 : /* no outputs */
80dc0d6b 909 : "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
910 }
911}
912
913/* In order to make the even parity correct we must do two things.
914 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
915 * Next, we clear out all 32-bytes of data for that line. Data of
916 * all-zero + tag parity value of zero == correct parity.
917 */
918static void cheetah_plus_zap_dcache_parity(void)
919{
80dc0d6b
DM
920 unsigned int dcache_size, dcache_line_size;
921 unsigned long addr;
922
923 dcache_size = local_cpu_data().dcache_size;
924 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 925
80dc0d6b
DM
926 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
927 unsigned long tag = (addr >> 14);
928 unsigned long line;
1da177e4
LT
929
930 __asm__ __volatile__("membar #Sync\n\t"
931 "stxa %0, [%1] %2\n\t"
932 "membar #Sync"
933 : /* no outputs */
80dc0d6b 934 : "r" (tag), "r" (addr),
1da177e4 935 "i" (ASI_DCACHE_UTAG));
80dc0d6b 936 for (line = addr; line < addr + dcache_line_size; line += 8)
1da177e4
LT
937 __asm__ __volatile__("membar #Sync\n\t"
938 "stxa %%g0, [%0] %1\n\t"
939 "membar #Sync"
940 : /* no outputs */
80dc0d6b
DM
941 : "r" (line),
942 "i" (ASI_DCACHE_DATA));
1da177e4
LT
943 }
944}
945
946/* Conversion tables used to frob Cheetah AFSR syndrome values into
947 * something palatable to the memory controller driver get_unumber
948 * routine.
949 */
950#define MT0 137
951#define MT1 138
952#define MT2 139
953#define NONE 254
954#define MTC0 140
955#define MTC1 141
956#define MTC2 142
957#define MTC3 143
958#define C0 128
959#define C1 129
960#define C2 130
961#define C3 131
962#define C4 132
963#define C5 133
964#define C6 134
965#define C7 135
966#define C8 136
967#define M2 144
968#define M3 145
969#define M4 146
970#define M 147
971static unsigned char cheetah_ecc_syntab[] = {
972/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
973/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
974/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
975/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
976/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
977/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
978/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
979/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
980/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
981/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
982/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
983/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
984/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
985/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
986/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
987/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
988/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
989/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
990/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
991/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
992/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
993/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
994/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
995/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
996/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
997/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
998/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
999/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1000/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1001/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1002/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1003/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1004};
1005static unsigned char cheetah_mtag_syntab[] = {
1006 NONE, MTC0,
1007 MTC1, NONE,
1008 MTC2, NONE,
1009 NONE, MT0,
1010 MTC3, NONE,
1011 NONE, MT1,
1012 NONE, MT2,
1013 NONE, NONE
1014};
1015
1016/* Return the highest priority error conditon mentioned. */
1017static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1018{
1019 unsigned long tmp = 0;
1020 int i;
1021
1022 for (i = 0; cheetah_error_table[i].mask; i++) {
1023 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1024 return tmp;
1025 }
1026 return tmp;
1027}
1028
1029static const char *cheetah_get_string(unsigned long bit)
1030{
1031 int i;
1032
1033 for (i = 0; cheetah_error_table[i].mask; i++) {
1034 if ((bit & cheetah_error_table[i].mask) != 0UL)
1035 return cheetah_error_table[i].name;
1036 }
1037 return "???";
1038}
1039
1040extern int chmc_getunumber(int, unsigned long, char *, int);
1041
1042static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1043 unsigned long afsr, unsigned long afar, int recoverable)
1044{
1045 unsigned long hipri;
1046 char unum[256];
1047
1048 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1049 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1050 afsr, afar,
1051 (afsr & CHAFSR_TL1) ? 1 : 0);
1052 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1053 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1054 regs->tpc, regs->tnpc, regs->tstate);
1055 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1056 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1058 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1059 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1060 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1061 hipri = cheetah_get_hipri(afsr);
1062 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1063 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1064 hipri, cheetah_get_string(hipri));
1065
1066 /* Try to get unumber if relevant. */
1067#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1068 CHAFSR_CPC | CHAFSR_CPU | \
1069 CHAFSR_UE | CHAFSR_CE | \
1070 CHAFSR_EDC | CHAFSR_EDU | \
1071 CHAFSR_UCC | CHAFSR_UCU | \
1072 CHAFSR_WDU | CHAFSR_WDC)
1073#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1074 if (afsr & ESYND_ERRORS) {
1075 int syndrome;
1076 int ret;
1077
1078 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1079 syndrome = cheetah_ecc_syntab[syndrome];
1080 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1081 if (ret != -1)
1082 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1083 (recoverable ? KERN_WARNING : KERN_CRIT),
1084 smp_processor_id(), unum);
1085 } else if (afsr & MSYND_ERRORS) {
1086 int syndrome;
1087 int ret;
1088
1089 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1090 syndrome = cheetah_mtag_syntab[syndrome];
1091 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1092 if (ret != -1)
1093 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1094 (recoverable ? KERN_WARNING : KERN_CRIT),
1095 smp_processor_id(), unum);
1096 }
1097
1098 /* Now dump the cache snapshots. */
1099 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1100 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1101 (int) info->dcache_index,
1102 info->dcache_tag,
1103 info->dcache_utag,
1104 info->dcache_stag);
1105 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1106 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1107 info->dcache_data[0],
1108 info->dcache_data[1],
1109 info->dcache_data[2],
1110 info->dcache_data[3]);
1111 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1112 "u[%016lx] l[%016lx]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1114 (int) info->icache_index,
1115 info->icache_tag,
1116 info->icache_utag,
1117 info->icache_stag,
1118 info->icache_upper,
1119 info->icache_lower);
1120 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1121 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1122 info->icache_data[0],
1123 info->icache_data[1],
1124 info->icache_data[2],
1125 info->icache_data[3]);
1126 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1128 info->icache_data[4],
1129 info->icache_data[5],
1130 info->icache_data[6],
1131 info->icache_data[7]);
1132 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 (int) info->ecache_index, info->ecache_tag);
1135 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 info->ecache_data[0],
1138 info->ecache_data[1],
1139 info->ecache_data[2],
1140 info->ecache_data[3]);
1141
1142 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1143 while (afsr != 0UL) {
1144 unsigned long bit = cheetah_get_hipri(afsr);
1145
1146 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1147 (recoverable ? KERN_WARNING : KERN_CRIT),
1148 bit, cheetah_get_string(bit));
1149
1150 afsr &= ~bit;
1151 }
1152
1153 if (!recoverable)
1154 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1155}
1156
1157static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1158{
1159 unsigned long afsr, afar;
1160 int ret = 0;
1161
1162 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1163 : "=r" (afsr)
1164 : "i" (ASI_AFSR));
1165 if ((afsr & cheetah_afsr_errors) != 0) {
1166 if (logp != NULL) {
1167 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1168 : "=r" (afar)
1169 : "i" (ASI_AFAR));
1170 logp->afsr = afsr;
1171 logp->afar = afar;
1172 }
1173 ret = 1;
1174 }
1175 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1176 "membar #Sync\n\t"
1177 : : "r" (afsr), "i" (ASI_AFSR));
1178
1179 return ret;
1180}
1181
1182void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1183{
1184 struct cheetah_err_info local_snapshot, *p;
1185 int recoverable;
1186
1187 /* Flush E-cache */
1188 cheetah_flush_ecache();
1189
1190 p = cheetah_get_error_log(afsr);
1191 if (!p) {
1192 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1193 afsr, afar);
1194 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1195 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1196 prom_halt();
1197 }
1198
1199 /* Grab snapshot of logged error. */
1200 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1201
1202 /* If the current trap snapshot does not match what the
1203 * trap handler passed along into our args, big trouble.
1204 * In such a case, mark the local copy as invalid.
1205 *
1206 * Else, it matches and we mark the afsr in the non-local
1207 * copy as invalid so we may log new error traps there.
1208 */
1209 if (p->afsr != afsr || p->afar != afar)
1210 local_snapshot.afsr = CHAFSR_INVALID;
1211 else
1212 p->afsr = CHAFSR_INVALID;
1213
1214 cheetah_flush_icache();
1215 cheetah_flush_dcache();
1216
1217 /* Re-enable I-cache/D-cache */
1218 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1219 "or %%g1, %1, %%g1\n\t"
1220 "stxa %%g1, [%%g0] %0\n\t"
1221 "membar #Sync"
1222 : /* no outputs */
1223 : "i" (ASI_DCU_CONTROL_REG),
1224 "i" (DCU_DC | DCU_IC)
1225 : "g1");
1226
1227 /* Re-enable error reporting */
1228 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1229 "or %%g1, %1, %%g1\n\t"
1230 "stxa %%g1, [%%g0] %0\n\t"
1231 "membar #Sync"
1232 : /* no outputs */
1233 : "i" (ASI_ESTATE_ERROR_EN),
1234 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1235 : "g1");
1236
1237 /* Decide if we can continue after handling this trap and
1238 * logging the error.
1239 */
1240 recoverable = 1;
1241 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1242 recoverable = 0;
1243
1244 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1245 * error was logged while we had error reporting traps disabled.
1246 */
1247 if (cheetah_recheck_errors(&local_snapshot)) {
1248 unsigned long new_afsr = local_snapshot.afsr;
1249
1250 /* If we got a new asynchronous error, die... */
1251 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1252 CHAFSR_WDU | CHAFSR_CPU |
1253 CHAFSR_IVU | CHAFSR_UE |
1254 CHAFSR_BERR | CHAFSR_TO))
1255 recoverable = 0;
1256 }
1257
1258 /* Log errors. */
1259 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1260
1261 if (!recoverable)
1262 panic("Irrecoverable Fast-ECC error trap.\n");
1263
1264 /* Flush E-cache to kick the error trap handlers out. */
1265 cheetah_flush_ecache();
1266}
1267
1268/* Try to fix a correctable error by pushing the line out from
1269 * the E-cache. Recheck error reporting registers to see if the
1270 * problem is intermittent.
1271 */
1272static int cheetah_fix_ce(unsigned long physaddr)
1273{
1274 unsigned long orig_estate;
1275 unsigned long alias1, alias2;
1276 int ret;
1277
1278 /* Make sure correctable error traps are disabled. */
1279 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1280 "andn %0, %1, %%g1\n\t"
1281 "stxa %%g1, [%%g0] %2\n\t"
1282 "membar #Sync"
1283 : "=&r" (orig_estate)
1284 : "i" (ESTATE_ERROR_CEEN),
1285 "i" (ASI_ESTATE_ERROR_EN)
1286 : "g1");
1287
1288 /* We calculate alias addresses that will force the
1289 * cache line in question out of the E-cache. Then
1290 * we bring it back in with an atomic instruction so
1291 * that we get it in some modified/exclusive state,
1292 * then we displace it again to try and get proper ECC
1293 * pushed back into the system.
1294 */
1295 physaddr &= ~(8UL - 1UL);
1296 alias1 = (ecache_flush_physbase +
1297 (physaddr & ((ecache_flush_size >> 1) - 1)));
1298 alias2 = alias1 + (ecache_flush_size >> 1);
1299 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1300 "ldxa [%1] %3, %%g0\n\t"
1301 "casxa [%2] %3, %%g0, %%g0\n\t"
1302 "membar #StoreLoad | #StoreStore\n\t"
1303 "ldxa [%0] %3, %%g0\n\t"
1304 "ldxa [%1] %3, %%g0\n\t"
1305 "membar #Sync"
1306 : /* no outputs */
1307 : "r" (alias1), "r" (alias2),
1308 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1309
1310 /* Did that trigger another error? */
1311 if (cheetah_recheck_errors(NULL)) {
1312 /* Try one more time. */
1313 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1314 "membar #Sync"
1315 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1316 if (cheetah_recheck_errors(NULL))
1317 ret = 2;
1318 else
1319 ret = 1;
1320 } else {
1321 /* No new error, intermittent problem. */
1322 ret = 0;
1323 }
1324
1325 /* Restore error enables. */
1326 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1327 "membar #Sync"
1328 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1329
1330 return ret;
1331}
1332
1333/* Return non-zero if PADDR is a valid physical memory address. */
1334static int cheetah_check_main_memory(unsigned long paddr)
1335{
10147570 1336 unsigned long vaddr = PAGE_OFFSET + paddr;
1da177e4 1337
13edad7a 1338 if (vaddr > (unsigned long) high_memory)
ed3ffaf7
DM
1339 return 0;
1340
10147570 1341 return kern_addr_valid(vaddr);
1da177e4
LT
1342}
1343
1344void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1345{
1346 struct cheetah_err_info local_snapshot, *p;
1347 int recoverable, is_memory;
1348
1349 p = cheetah_get_error_log(afsr);
1350 if (!p) {
1351 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1352 afsr, afar);
1353 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1354 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1355 prom_halt();
1356 }
1357
1358 /* Grab snapshot of logged error. */
1359 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1360
1361 /* If the current trap snapshot does not match what the
1362 * trap handler passed along into our args, big trouble.
1363 * In such a case, mark the local copy as invalid.
1364 *
1365 * Else, it matches and we mark the afsr in the non-local
1366 * copy as invalid so we may log new error traps there.
1367 */
1368 if (p->afsr != afsr || p->afar != afar)
1369 local_snapshot.afsr = CHAFSR_INVALID;
1370 else
1371 p->afsr = CHAFSR_INVALID;
1372
1373 is_memory = cheetah_check_main_memory(afar);
1374
1375 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1376 /* XXX Might want to log the results of this operation
1377 * XXX somewhere... -DaveM
1378 */
1379 cheetah_fix_ce(afar);
1380 }
1381
1382 {
1383 int flush_all, flush_line;
1384
1385 flush_all = flush_line = 0;
1386 if ((afsr & CHAFSR_EDC) != 0UL) {
1387 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1388 flush_line = 1;
1389 else
1390 flush_all = 1;
1391 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1392 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1393 flush_line = 1;
1394 else
1395 flush_all = 1;
1396 }
1397
1398 /* Trap handler only disabled I-cache, flush it. */
1399 cheetah_flush_icache();
1400
1401 /* Re-enable I-cache */
1402 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1403 "or %%g1, %1, %%g1\n\t"
1404 "stxa %%g1, [%%g0] %0\n\t"
1405 "membar #Sync"
1406 : /* no outputs */
1407 : "i" (ASI_DCU_CONTROL_REG),
1408 "i" (DCU_IC)
1409 : "g1");
1410
1411 if (flush_all)
1412 cheetah_flush_ecache();
1413 else if (flush_line)
1414 cheetah_flush_ecache_line(afar);
1415 }
1416
1417 /* Re-enable error reporting */
1418 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1419 "or %%g1, %1, %%g1\n\t"
1420 "stxa %%g1, [%%g0] %0\n\t"
1421 "membar #Sync"
1422 : /* no outputs */
1423 : "i" (ASI_ESTATE_ERROR_EN),
1424 "i" (ESTATE_ERROR_CEEN)
1425 : "g1");
1426
1427 /* Decide if we can continue after handling this trap and
1428 * logging the error.
1429 */
1430 recoverable = 1;
1431 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1432 recoverable = 0;
1433
1434 /* Re-check AFSR/AFAR */
1435 (void) cheetah_recheck_errors(&local_snapshot);
1436
1437 /* Log errors. */
1438 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1439
1440 if (!recoverable)
1441 panic("Irrecoverable Correctable-ECC error trap.\n");
1442}
1443
1444void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1445{
1446 struct cheetah_err_info local_snapshot, *p;
1447 int recoverable, is_memory;
1448
1449#ifdef CONFIG_PCI
1450 /* Check for the special PCI poke sequence. */
1451 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1452 cheetah_flush_icache();
1453 cheetah_flush_dcache();
1454
1455 /* Re-enable I-cache/D-cache */
1456 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1457 "or %%g1, %1, %%g1\n\t"
1458 "stxa %%g1, [%%g0] %0\n\t"
1459 "membar #Sync"
1460 : /* no outputs */
1461 : "i" (ASI_DCU_CONTROL_REG),
1462 "i" (DCU_DC | DCU_IC)
1463 : "g1");
1464
1465 /* Re-enable error reporting */
1466 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1467 "or %%g1, %1, %%g1\n\t"
1468 "stxa %%g1, [%%g0] %0\n\t"
1469 "membar #Sync"
1470 : /* no outputs */
1471 : "i" (ASI_ESTATE_ERROR_EN),
1472 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1473 : "g1");
1474
1475 (void) cheetah_recheck_errors(NULL);
1476
1477 pci_poke_faulted = 1;
1478 regs->tpc += 4;
1479 regs->tnpc = regs->tpc + 4;
1480 return;
1481 }
1482#endif
1483
1484 p = cheetah_get_error_log(afsr);
1485 if (!p) {
1486 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1487 afsr, afar);
1488 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1489 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1490 prom_halt();
1491 }
1492
1493 /* Grab snapshot of logged error. */
1494 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1495
1496 /* If the current trap snapshot does not match what the
1497 * trap handler passed along into our args, big trouble.
1498 * In such a case, mark the local copy as invalid.
1499 *
1500 * Else, it matches and we mark the afsr in the non-local
1501 * copy as invalid so we may log new error traps there.
1502 */
1503 if (p->afsr != afsr || p->afar != afar)
1504 local_snapshot.afsr = CHAFSR_INVALID;
1505 else
1506 p->afsr = CHAFSR_INVALID;
1507
1508 is_memory = cheetah_check_main_memory(afar);
1509
1510 {
1511 int flush_all, flush_line;
1512
1513 flush_all = flush_line = 0;
1514 if ((afsr & CHAFSR_EDU) != 0UL) {
1515 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1516 flush_line = 1;
1517 else
1518 flush_all = 1;
1519 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1520 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1521 flush_line = 1;
1522 else
1523 flush_all = 1;
1524 }
1525
1526 cheetah_flush_icache();
1527 cheetah_flush_dcache();
1528
1529 /* Re-enable I/D caches */
1530 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1531 "or %%g1, %1, %%g1\n\t"
1532 "stxa %%g1, [%%g0] %0\n\t"
1533 "membar #Sync"
1534 : /* no outputs */
1535 : "i" (ASI_DCU_CONTROL_REG),
1536 "i" (DCU_IC | DCU_DC)
1537 : "g1");
1538
1539 if (flush_all)
1540 cheetah_flush_ecache();
1541 else if (flush_line)
1542 cheetah_flush_ecache_line(afar);
1543 }
1544
1545 /* Re-enable error reporting */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1549 "membar #Sync"
1550 : /* no outputs */
1551 : "i" (ASI_ESTATE_ERROR_EN),
1552 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1553 : "g1");
1554
1555 /* Decide if we can continue after handling this trap and
1556 * logging the error.
1557 */
1558 recoverable = 1;
1559 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1560 recoverable = 0;
1561
1562 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1563 * error was logged while we had error reporting traps disabled.
1564 */
1565 if (cheetah_recheck_errors(&local_snapshot)) {
1566 unsigned long new_afsr = local_snapshot.afsr;
1567
1568 /* If we got a new asynchronous error, die... */
1569 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1570 CHAFSR_WDU | CHAFSR_CPU |
1571 CHAFSR_IVU | CHAFSR_UE |
1572 CHAFSR_BERR | CHAFSR_TO))
1573 recoverable = 0;
1574 }
1575
1576 /* Log errors. */
1577 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1578
1579 /* "Recoverable" here means we try to yank the page from ever
1580 * being newly used again. This depends upon a few things:
1581 * 1) Must be main memory, and AFAR must be valid.
1582 * 2) If we trapped from user, OK.
1583 * 3) Else, if we trapped from kernel we must find exception
1584 * table entry (ie. we have to have been accessing user
1585 * space).
1586 *
1587 * If AFAR is not in main memory, or we trapped from kernel
1588 * and cannot find an exception table entry, it is unacceptable
1589 * to try and continue.
1590 */
1591 if (recoverable && is_memory) {
1592 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1593 /* OK, usermode access. */
1594 recoverable = 1;
1595 } else {
8cf14af0 1596 const struct exception_table_entry *entry;
1da177e4 1597
8cf14af0
DM
1598 entry = search_exception_tables(regs->tpc);
1599 if (entry) {
1da177e4
LT
1600 /* OK, kernel access to userspace. */
1601 recoverable = 1;
1602
1603 } else {
1604 /* BAD, privileged state is corrupted. */
1605 recoverable = 0;
1606 }
1607
1608 if (recoverable) {
1609 if (pfn_valid(afar >> PAGE_SHIFT))
1610 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1611 else
1612 recoverable = 0;
1613
1614 /* Only perform fixup if we still have a
1615 * recoverable condition.
1616 */
1617 if (recoverable) {
8cf14af0 1618 regs->tpc = entry->fixup;
1da177e4 1619 regs->tnpc = regs->tpc + 4;
1da177e4
LT
1620 }
1621 }
1622 }
1623 } else {
1624 recoverable = 0;
1625 }
1626
1627 if (!recoverable)
1628 panic("Irrecoverable deferred error trap.\n");
1629}
1630
1631/* Handle a D/I cache parity error trap. TYPE is encoded as:
1632 *
1633 * Bit0: 0=dcache,1=icache
1634 * Bit1: 0=recoverable,1=unrecoverable
1635 *
1636 * The hardware has disabled both the I-cache and D-cache in
1637 * the %dcr register.
1638 */
1639void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1640{
1641 if (type & 0x1)
1642 __cheetah_flush_icache();
1643 else
1644 cheetah_plus_zap_dcache_parity();
1645 cheetah_flush_dcache();
1646
1647 /* Re-enable I-cache/D-cache */
1648 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1649 "or %%g1, %1, %%g1\n\t"
1650 "stxa %%g1, [%%g0] %0\n\t"
1651 "membar #Sync"
1652 : /* no outputs */
1653 : "i" (ASI_DCU_CONTROL_REG),
1654 "i" (DCU_DC | DCU_IC)
1655 : "g1");
1656
1657 if (type & 0x2) {
1658 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1659 smp_processor_id(),
1660 (type & 0x1) ? 'I' : 'D',
1661 regs->tpc);
1662 panic("Irrecoverable Cheetah+ parity error.");
1663 }
1664
1665 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1666 smp_processor_id(),
1667 (type & 0x1) ? 'I' : 'D',
1668 regs->tpc);
1669}
1670
5b0c0572
DM
1671struct sun4v_error_entry {
1672 u64 err_handle;
1673 u64 err_stick;
1674
1675 u32 err_type;
1676#define SUN4V_ERR_TYPE_UNDEFINED 0
1677#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1678#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1679#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1680#define SUN4V_ERR_TYPE_WARNING_RES 4
1681
1682 u32 err_attrs;
1683#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1684#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1685#define SUN4V_ERR_ATTRS_PIO 0x00000004
1686#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1687#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1688#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1689#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1690#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1691
1692 u64 err_raddr;
1693 u32 err_size;
1694 u16 err_cpu;
1695 u16 err_pad;
1696};
1697
1698static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1699static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1700
1701static const char *sun4v_err_type_to_str(u32 type)
1702{
1703 switch (type) {
1704 case SUN4V_ERR_TYPE_UNDEFINED:
1705 return "undefined";
1706 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1707 return "uncorrected resumable";
1708 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1709 return "precise nonresumable";
1710 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1711 return "deferred nonresumable";
1712 case SUN4V_ERR_TYPE_WARNING_RES:
1713 return "warning resumable";
1714 default:
1715 return "unknown";
1716 };
1717}
1718
1719static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1720{
1721 int cnt;
1722
1723 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1724 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1725 pfx,
1726 ent->err_handle, ent->err_stick,
1727 ent->err_type,
1728 sun4v_err_type_to_str(ent->err_type));
1729 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1730 pfx,
1731 ent->err_attrs,
1732 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1733 "processor" : ""),
1734 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1735 "memory" : ""),
1736 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1737 "pio" : ""),
1738 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1739 "integer-regs" : ""),
1740 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1741 "fpu-regs" : ""),
1742 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1743 "user" : ""),
1744 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1745 "privileged" : ""),
1746 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1747 "queue-full" : ""));
1748 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1749 pfx,
1750 ent->err_raddr, ent->err_size, ent->err_cpu);
1751
1752 if ((cnt = atomic_read(ocnt)) != 0) {
1753 atomic_set(ocnt, 0);
1754 wmb();
1755 printk("%s: Queue overflowed %d times.\n",
1756 pfx, cnt);
1757 }
1758}
1759
1760/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1761 * Log the event and clear the first word of the entry.
1762 */
1763void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1764{
1765 struct sun4v_error_entry *ent, local_copy;
1766 struct trap_per_cpu *tb;
1767 unsigned long paddr;
1768 int cpu;
1769
1770 cpu = get_cpu();
1771
1772 tb = &trap_block[cpu];
1773 paddr = tb->resum_kernel_buf_pa + offset;
1774 ent = __va(paddr);
1775
1776 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1777
1778 /* We have a local copy now, so release the entry. */
1779 ent->err_handle = 0;
1780 wmb();
1781
1782 put_cpu();
1783
1784 sun4v_log_error(&local_copy, cpu,
1785 KERN_ERR "RESUMABLE ERROR",
1786 &sun4v_resum_oflow_cnt);
1787}
1788
1789/* If we try to printk() we'll probably make matters worse, by trying
1790 * to retake locks this cpu already holds or causing more errors. So
1791 * just bump a counter, and we'll report these counter bumps above.
1792 */
1793void sun4v_resum_overflow(struct pt_regs *regs)
1794{
1795 atomic_inc(&sun4v_resum_oflow_cnt);
1796}
1797
1798/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1799 * Log the event, clear the first word of the entry, and die.
1800 */
1801void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1802{
1803 struct sun4v_error_entry *ent, local_copy;
1804 struct trap_per_cpu *tb;
1805 unsigned long paddr;
1806 int cpu;
1807
1808 cpu = get_cpu();
1809
1810 tb = &trap_block[cpu];
1811 paddr = tb->nonresum_kernel_buf_pa + offset;
1812 ent = __va(paddr);
1813
1814 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1815
1816 /* We have a local copy now, so release the entry. */
1817 ent->err_handle = 0;
1818 wmb();
1819
1820 put_cpu();
1821
1822#ifdef CONFIG_PCI
1823 /* Check for the special PCI poke sequence. */
1824 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1825 pci_poke_faulted = 1;
1826 regs->tpc += 4;
1827 regs->tnpc = regs->tpc + 4;
1828 return;
1829 }
1830#endif
1831
1832 sun4v_log_error(&local_copy, cpu,
1833 KERN_EMERG "NON-RESUMABLE ERROR",
1834 &sun4v_nonresum_oflow_cnt);
1835
1836 panic("Non-resumable error.");
1837}
1838
1839/* If we try to printk() we'll probably make matters worse, by trying
1840 * to retake locks this cpu already holds or causing more errors. So
1841 * just bump a counter, and we'll report these counter bumps above.
1842 */
1843void sun4v_nonresum_overflow(struct pt_regs *regs)
1844{
1845 /* XXX Actually even this can make not that much sense. Perhaps
1846 * XXX we should just pull the plug and panic directly from here?
1847 */
1848 atomic_inc(&sun4v_nonresum_oflow_cnt);
1849}
1850
1da177e4
LT
1851void do_fpe_common(struct pt_regs *regs)
1852{
1853 if (regs->tstate & TSTATE_PRIV) {
1854 regs->tpc = regs->tnpc;
1855 regs->tnpc += 4;
1856 } else {
1857 unsigned long fsr = current_thread_info()->xfsr[0];
1858 siginfo_t info;
1859
1860 if (test_thread_flag(TIF_32BIT)) {
1861 regs->tpc &= 0xffffffff;
1862 regs->tnpc &= 0xffffffff;
1863 }
1864 info.si_signo = SIGFPE;
1865 info.si_errno = 0;
1866 info.si_addr = (void __user *)regs->tpc;
1867 info.si_trapno = 0;
1868 info.si_code = __SI_FAULT;
1869 if ((fsr & 0x1c000) == (1 << 14)) {
1870 if (fsr & 0x10)
1871 info.si_code = FPE_FLTINV;
1872 else if (fsr & 0x08)
1873 info.si_code = FPE_FLTOVF;
1874 else if (fsr & 0x04)
1875 info.si_code = FPE_FLTUND;
1876 else if (fsr & 0x02)
1877 info.si_code = FPE_FLTDIV;
1878 else if (fsr & 0x01)
1879 info.si_code = FPE_FLTRES;
1880 }
1881 force_sig_info(SIGFPE, &info, current);
1882 }
1883}
1884
1885void do_fpieee(struct pt_regs *regs)
1886{
1887 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1888 0, 0x24, SIGFPE) == NOTIFY_STOP)
1889 return;
1890
1891 do_fpe_common(regs);
1892}
1893
1894extern int do_mathemu(struct pt_regs *, struct fpustate *);
1895
1896void do_fpother(struct pt_regs *regs)
1897{
1898 struct fpustate *f = FPUSTATE;
1899 int ret = 0;
1900
1901 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1902 0, 0x25, SIGFPE) == NOTIFY_STOP)
1903 return;
1904
1905 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1906 case (2 << 14): /* unfinished_FPop */
1907 case (3 << 14): /* unimplemented_FPop */
1908 ret = do_mathemu(regs, f);
1909 break;
1910 }
1911 if (ret)
1912 return;
1913 do_fpe_common(regs);
1914}
1915
1916void do_tof(struct pt_regs *regs)
1917{
1918 siginfo_t info;
1919
1920 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1921 0, 0x26, SIGEMT) == NOTIFY_STOP)
1922 return;
1923
1924 if (regs->tstate & TSTATE_PRIV)
1925 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1926 if (test_thread_flag(TIF_32BIT)) {
1927 regs->tpc &= 0xffffffff;
1928 regs->tnpc &= 0xffffffff;
1929 }
1930 info.si_signo = SIGEMT;
1931 info.si_errno = 0;
1932 info.si_code = EMT_TAGOVF;
1933 info.si_addr = (void __user *)regs->tpc;
1934 info.si_trapno = 0;
1935 force_sig_info(SIGEMT, &info, current);
1936}
1937
1938void do_div0(struct pt_regs *regs)
1939{
1940 siginfo_t info;
1941
1942 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1943 0, 0x28, SIGFPE) == NOTIFY_STOP)
1944 return;
1945
1946 if (regs->tstate & TSTATE_PRIV)
1947 die_if_kernel("TL0: Kernel divide by zero.", regs);
1948 if (test_thread_flag(TIF_32BIT)) {
1949 regs->tpc &= 0xffffffff;
1950 regs->tnpc &= 0xffffffff;
1951 }
1952 info.si_signo = SIGFPE;
1953 info.si_errno = 0;
1954 info.si_code = FPE_INTDIV;
1955 info.si_addr = (void __user *)regs->tpc;
1956 info.si_trapno = 0;
1957 force_sig_info(SIGFPE, &info, current);
1958}
1959
1960void instruction_dump (unsigned int *pc)
1961{
1962 int i;
1963
1964 if ((((unsigned long) pc) & 3))
1965 return;
1966
1967 printk("Instruction DUMP:");
1968 for (i = -3; i < 6; i++)
1969 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1970 printk("\n");
1971}
1972
1973static void user_instruction_dump (unsigned int __user *pc)
1974{
1975 int i;
1976 unsigned int buf[9];
1977
1978 if ((((unsigned long) pc) & 3))
1979 return;
1980
1981 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1982 return;
1983
1984 printk("Instruction DUMP:");
1985 for (i = 0; i < 9; i++)
1986 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1987 printk("\n");
1988}
1989
1990void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1991{
1992 unsigned long pc, fp, thread_base, ksp;
ee3eea16 1993 void *tp = task_stack_page(tsk);
1da177e4
LT
1994 struct reg_window *rw;
1995 int count = 0;
1996
1997 ksp = (unsigned long) _ksp;
1998
1999 if (tp == current_thread_info())
2000 flushw_all();
2001
2002 fp = ksp + STACK_BIAS;
2003 thread_base = (unsigned long) tp;
2004
2005 printk("Call Trace:");
2006#ifdef CONFIG_KALLSYMS
2007 printk("\n");
2008#endif
2009 do {
2010 /* Bogus frame pointer? */
2011 if (fp < (thread_base + sizeof(struct thread_info)) ||
2012 fp >= (thread_base + THREAD_SIZE))
2013 break;
2014 rw = (struct reg_window *)fp;
2015 pc = rw->ins[7];
2016 printk(" [%016lx] ", pc);
2017 print_symbol("%s\n", pc);
2018 fp = rw->ins[6] + STACK_BIAS;
2019 } while (++count < 16);
2020#ifndef CONFIG_KALLSYMS
2021 printk("\n");
2022#endif
2023}
2024
2025void dump_stack(void)
2026{
2027 unsigned long *ksp;
2028
2029 __asm__ __volatile__("mov %%fp, %0"
2030 : "=r" (ksp));
2031 show_stack(current, ksp);
2032}
2033
2034EXPORT_SYMBOL(dump_stack);
2035
2036static inline int is_kernel_stack(struct task_struct *task,
2037 struct reg_window *rw)
2038{
2039 unsigned long rw_addr = (unsigned long) rw;
2040 unsigned long thread_base, thread_end;
2041
2042 if (rw_addr < PAGE_OFFSET) {
2043 if (task != &init_task)
2044 return 0;
2045 }
2046
ee3eea16 2047 thread_base = (unsigned long) task_stack_page(task);
1da177e4
LT
2048 thread_end = thread_base + sizeof(union thread_union);
2049 if (rw_addr >= thread_base &&
2050 rw_addr < thread_end &&
2051 !(rw_addr & 0x7UL))
2052 return 1;
2053
2054 return 0;
2055}
2056
2057static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2058{
2059 unsigned long fp = rw->ins[6];
2060
2061 if (!fp)
2062 return NULL;
2063
2064 return (struct reg_window *) (fp + STACK_BIAS);
2065}
2066
2067void die_if_kernel(char *str, struct pt_regs *regs)
2068{
2069 static int die_counter;
2070 extern void __show_regs(struct pt_regs * regs);
2071 extern void smp_report_regs(void);
2072 int count = 0;
2073
2074 /* Amuse the user. */
2075 printk(
2076" \\|/ ____ \\|/\n"
2077" \"@'/ .. \\`@\"\n"
2078" /_| \\__/ |_\\\n"
2079" \\__U_/\n");
2080
2081 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2082 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2083 __asm__ __volatile__("flushw");
2084 __show_regs(regs);
2085 if (regs->tstate & TSTATE_PRIV) {
2086 struct reg_window *rw = (struct reg_window *)
2087 (regs->u_regs[UREG_FP] + STACK_BIAS);
2088
2089 /* Stop the back trace when we hit userland or we
2090 * find some badly aligned kernel stack.
2091 */
2092 while (rw &&
2093 count++ < 30&&
2094 is_kernel_stack(current, rw)) {
2095 printk("Caller[%016lx]", rw->ins[7]);
2096 print_symbol(": %s", rw->ins[7]);
2097 printk("\n");
2098
2099 rw = kernel_stack_up(rw);
2100 }
2101 instruction_dump ((unsigned int *) regs->tpc);
2102 } else {
2103 if (test_thread_flag(TIF_32BIT)) {
2104 regs->tpc &= 0xffffffff;
2105 regs->tnpc &= 0xffffffff;
2106 }
2107 user_instruction_dump ((unsigned int __user *) regs->tpc);
2108 }
2109#ifdef CONFIG_SMP
2110 smp_report_regs();
2111#endif
2112
2113 if (regs->tstate & TSTATE_PRIV)
2114 do_exit(SIGKILL);
2115 do_exit(SIGSEGV);
2116}
2117
2118extern int handle_popc(u32 insn, struct pt_regs *regs);
2119extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2120
2121void do_illegal_instruction(struct pt_regs *regs)
2122{
2123 unsigned long pc = regs->tpc;
2124 unsigned long tstate = regs->tstate;
2125 u32 insn;
2126 siginfo_t info;
2127
2128 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2129 0, 0x10, SIGILL) == NOTIFY_STOP)
2130 return;
2131
2132 if (tstate & TSTATE_PRIV)
2133 die_if_kernel("Kernel illegal instruction", regs);
2134 if (test_thread_flag(TIF_32BIT))
2135 pc = (u32)pc;
2136 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2137 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2138 if (handle_popc(insn, regs))
2139 return;
2140 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2141 if (handle_ldf_stq(insn, regs))
2142 return;
2143 }
2144 }
2145 info.si_signo = SIGILL;
2146 info.si_errno = 0;
2147 info.si_code = ILL_ILLOPC;
2148 info.si_addr = (void __user *)pc;
2149 info.si_trapno = 0;
2150 force_sig_info(SIGILL, &info, current);
2151}
2152
2153void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2154{
2155 siginfo_t info;
2156
2157 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2158 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2159 return;
2160
2161 if (regs->tstate & TSTATE_PRIV) {
2162 extern void kernel_unaligned_trap(struct pt_regs *regs,
2163 unsigned int insn,
2164 unsigned long sfar,
2165 unsigned long sfsr);
2166
2167 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
2168 sfar, sfsr);
2169 return;
2170 }
2171 info.si_signo = SIGBUS;
2172 info.si_errno = 0;
2173 info.si_code = BUS_ADRALN;
2174 info.si_addr = (void __user *)sfar;
2175 info.si_trapno = 0;
2176 force_sig_info(SIGBUS, &info, current);
2177}
2178
2179void do_privop(struct pt_regs *regs)
2180{
2181 siginfo_t info;
2182
2183 if (notify_die(DIE_TRAP, "privileged operation", regs,
2184 0, 0x11, SIGILL) == NOTIFY_STOP)
2185 return;
2186
2187 if (test_thread_flag(TIF_32BIT)) {
2188 regs->tpc &= 0xffffffff;
2189 regs->tnpc &= 0xffffffff;
2190 }
2191 info.si_signo = SIGILL;
2192 info.si_errno = 0;
2193 info.si_code = ILL_PRVOPC;
2194 info.si_addr = (void __user *)regs->tpc;
2195 info.si_trapno = 0;
2196 force_sig_info(SIGILL, &info, current);
2197}
2198
2199void do_privact(struct pt_regs *regs)
2200{
2201 do_privop(regs);
2202}
2203
2204/* Trap level 1 stuff or other traps we should never see... */
2205void do_cee(struct pt_regs *regs)
2206{
2207 die_if_kernel("TL0: Cache Error Exception", regs);
2208}
2209
2210void do_cee_tl1(struct pt_regs *regs)
2211{
2212 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2213 die_if_kernel("TL1: Cache Error Exception", regs);
2214}
2215
2216void do_dae_tl1(struct pt_regs *regs)
2217{
2218 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2219 die_if_kernel("TL1: Data Access Exception", regs);
2220}
2221
2222void do_iae_tl1(struct pt_regs *regs)
2223{
2224 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2225 die_if_kernel("TL1: Instruction Access Exception", regs);
2226}
2227
2228void do_div0_tl1(struct pt_regs *regs)
2229{
2230 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2231 die_if_kernel("TL1: DIV0 Exception", regs);
2232}
2233
2234void do_fpdis_tl1(struct pt_regs *regs)
2235{
2236 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2237 die_if_kernel("TL1: FPU Disabled", regs);
2238}
2239
2240void do_fpieee_tl1(struct pt_regs *regs)
2241{
2242 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2243 die_if_kernel("TL1: FPU IEEE Exception", regs);
2244}
2245
2246void do_fpother_tl1(struct pt_regs *regs)
2247{
2248 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2249 die_if_kernel("TL1: FPU Other Exception", regs);
2250}
2251
2252void do_ill_tl1(struct pt_regs *regs)
2253{
2254 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2255 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2256}
2257
2258void do_irq_tl1(struct pt_regs *regs)
2259{
2260 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2261 die_if_kernel("TL1: IRQ Exception", regs);
2262}
2263
2264void do_lddfmna_tl1(struct pt_regs *regs)
2265{
2266 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2267 die_if_kernel("TL1: LDDF Exception", regs);
2268}
2269
2270void do_stdfmna_tl1(struct pt_regs *regs)
2271{
2272 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2273 die_if_kernel("TL1: STDF Exception", regs);
2274}
2275
2276void do_paw(struct pt_regs *regs)
2277{
2278 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2279}
2280
2281void do_paw_tl1(struct pt_regs *regs)
2282{
2283 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2284 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2285}
2286
2287void do_vaw(struct pt_regs *regs)
2288{
2289 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2290}
2291
2292void do_vaw_tl1(struct pt_regs *regs)
2293{
2294 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2295 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2296}
2297
2298void do_tof_tl1(struct pt_regs *regs)
2299{
2300 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2301 die_if_kernel("TL1: Tag Overflow Exception", regs);
2302}
2303
2304void do_getpsr(struct pt_regs *regs)
2305{
2306 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2307 regs->tpc = regs->tnpc;
2308 regs->tnpc += 4;
2309 if (test_thread_flag(TIF_32BIT)) {
2310 regs->tpc &= 0xffffffff;
2311 regs->tnpc &= 0xffffffff;
2312 }
2313}
2314
56fb4df6
DM
2315struct trap_per_cpu trap_block[NR_CPUS];
2316
2317/* This can get invoked before sched_init() so play it super safe
2318 * and use hard_smp_processor_id().
2319 */
2320void init_cur_cpu_trap(void)
2321{
2322 int cpu = hard_smp_processor_id();
2323 struct trap_per_cpu *p = &trap_block[cpu];
2324
2325 p->thread = current_thread_info();
2326 p->pgd_paddr = 0;
2327}
2328
1da177e4 2329extern void thread_info_offsets_are_bolixed_dave(void);
56fb4df6 2330extern void trap_per_cpu_offsets_are_bolixed_dave(void);
1da177e4
LT
2331
2332/* Only invoked on boot processor. */
2333void __init trap_init(void)
2334{
2335 /* Compile time sanity check. */
2336 if (TI_TASK != offsetof(struct thread_info, task) ||
2337 TI_FLAGS != offsetof(struct thread_info, flags) ||
2338 TI_CPU != offsetof(struct thread_info, cpu) ||
2339 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2340 TI_KSP != offsetof(struct thread_info, ksp) ||
2341 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2342 TI_KREGS != offsetof(struct thread_info, kregs) ||
2343 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2344 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2345 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2346 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2347 TI_GSR != offsetof(struct thread_info, gsr) ||
2348 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2349 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2350 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2351 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2352 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2353 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
1da177e4 2354 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
db7d9a4e
DM
2355 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2356 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
a3f99858
DM
2357 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2358 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2359 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
1da177e4
LT
2360 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2361 (TI_FPREGS & (64 - 1)))
2362 thread_info_offsets_are_bolixed_dave();
2363
56fb4df6 2364 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
e088ad7c
DM
2365 (TRAP_PER_CPU_PGD_PADDR !=
2366 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2367 (TRAP_PER_CPU_CPU_MONDO_PA !=
2368 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2369 (TRAP_PER_CPU_DEV_MONDO_PA !=
2370 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2371 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2372 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
5b0c0572
DM
2373 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2374 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
e088ad7c
DM
2375 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2376 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
5b0c0572
DM
2377 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2378 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
e088ad7c
DM
2379 (TRAP_PER_CPU_FAULT_INFO !=
2380 offsetof(struct trap_per_cpu, fault_info)))
56fb4df6
DM
2381 trap_per_cpu_offsets_are_bolixed_dave();
2382
1da177e4
LT
2383 /* Attach to the address space of init_task. On SMP we
2384 * do this in smp.c:smp_callin for other cpus.
2385 */
2386 atomic_inc(&init_mm.mm_count);
2387 current->active_mm = &init_mm;
2388}
This page took 0.252098 seconds and 5 git commands to generate.