2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
45 extern asmlinkage
void handle_int(void);
46 extern asmlinkage
void handle_tlbm(void);
47 extern asmlinkage
void handle_tlbl(void);
48 extern asmlinkage
void handle_tlbs(void);
49 extern asmlinkage
void handle_adel(void);
50 extern asmlinkage
void handle_ades(void);
51 extern asmlinkage
void handle_ibe(void);
52 extern asmlinkage
void handle_dbe(void);
53 extern asmlinkage
void handle_sys(void);
54 extern asmlinkage
void handle_bp(void);
55 extern asmlinkage
void handle_ri(void);
56 extern asmlinkage
void handle_cpu(void);
57 extern asmlinkage
void handle_ov(void);
58 extern asmlinkage
void handle_tr(void);
59 extern asmlinkage
void handle_fpe(void);
60 extern asmlinkage
void handle_mdmx(void);
61 extern asmlinkage
void handle_watch(void);
62 extern asmlinkage
void handle_mt(void);
63 extern asmlinkage
void handle_dsp(void);
64 extern asmlinkage
void handle_mcheck(void);
65 extern asmlinkage
void handle_reserved(void);
67 extern int fpu_emulator_cop1Handler(struct pt_regs
*xcp
,
68 struct mips_fpu_struct
*ctx
);
70 void (*board_be_init
)(void);
71 int (*board_be_handler
)(struct pt_regs
*regs
, int is_fixup
);
72 void (*board_nmi_handler_setup
)(void);
73 void (*board_ejtag_handler_setup
)(void);
74 void (*board_bind_eic_interrupt
)(int irq
, int regset
);
77 * These constant is for searching for possible module text segments.
78 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
80 #define MODULE_RANGE (8*1024*1024)
82 static void show_trace(unsigned long *stack
)
84 const int field
= 2 * sizeof(unsigned long);
87 printk("Call Trace:");
88 #ifdef CONFIG_KALLSYMS
91 while (!kstack_end(stack
)) {
93 if (__kernel_text_address(addr
)) {
94 printk(" [<%0*lx>] ", field
, addr
);
95 print_symbol("%s\n", addr
);
102 * This routine abuses get_user()/put_user() to reference pointers
103 * with at least a bit of error checking ...
105 void show_stack(struct task_struct
*task
, unsigned long *sp
)
107 const int field
= 2 * sizeof(unsigned long);
110 unsigned long *stack
;
113 if (task
&& task
!= current
)
114 sp
= (unsigned long *) task
->thread
.reg29
;
116 sp
= (unsigned long *) &sp
;
122 while ((unsigned long) sp
& (PAGE_SIZE
- 1)) {
123 if (i
&& ((i
% (64 / field
)) == 0))
130 if (__get_user(stackdata
, sp
++)) {
131 printk(" (Bad stack address)");
135 printk(" %0*lx", field
, stackdata
);
143 * The architecture-independent dump_stack generator
145 void dump_stack(void)
152 EXPORT_SYMBOL(dump_stack
);
154 void show_code(unsigned int *pc
)
160 for(i
= -3 ; i
< 6 ; i
++) {
162 if (__get_user(insn
, pc
+ i
)) {
163 printk(" (Bad address in epc)\n");
166 printk("%c%08x%c", (i
?' ':'<'), insn
, (i
?' ':'>'));
170 void show_regs(struct pt_regs
*regs
)
172 const int field
= 2 * sizeof(unsigned long);
173 unsigned int cause
= regs
->cp0_cause
;
176 printk("Cpu %d\n", smp_processor_id());
179 * Saved main processor registers
181 for (i
= 0; i
< 32; ) {
185 printk(" %0*lx", field
, 0UL);
186 else if (i
== 26 || i
== 27)
187 printk(" %*s", field
, "");
189 printk(" %0*lx", field
, regs
->regs
[i
]);
196 printk("Hi : %0*lx\n", field
, regs
->hi
);
197 printk("Lo : %0*lx\n", field
, regs
->lo
);
200 * Saved cp0 registers
202 printk("epc : %0*lx ", field
, regs
->cp0_epc
);
203 print_symbol("%s ", regs
->cp0_epc
);
204 printk(" %s\n", print_tainted());
205 printk("ra : %0*lx ", field
, regs
->regs
[31]);
206 print_symbol("%s\n", regs
->regs
[31]);
208 printk("Status: %08x ", (uint32_t) regs
->cp0_status
);
210 if (current_cpu_data
.isa_level
== MIPS_CPU_ISA_I
) {
211 if (regs
->cp0_status
& ST0_KUO
)
213 if (regs
->cp0_status
& ST0_IEO
)
215 if (regs
->cp0_status
& ST0_KUP
)
217 if (regs
->cp0_status
& ST0_IEP
)
219 if (regs
->cp0_status
& ST0_KUC
)
221 if (regs
->cp0_status
& ST0_IEC
)
224 if (regs
->cp0_status
& ST0_KX
)
226 if (regs
->cp0_status
& ST0_SX
)
228 if (regs
->cp0_status
& ST0_UX
)
230 switch (regs
->cp0_status
& ST0_KSU
) {
235 printk("SUPERVISOR ");
244 if (regs
->cp0_status
& ST0_ERL
)
246 if (regs
->cp0_status
& ST0_EXL
)
248 if (regs
->cp0_status
& ST0_IE
)
253 printk("Cause : %08x\n", cause
);
255 cause
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
256 if (1 <= cause
&& cause
<= 5)
257 printk("BadVA : %0*lx\n", field
, regs
->cp0_badvaddr
);
259 printk("PrId : %08x\n", read_c0_prid());
262 void show_registers(struct pt_regs
*regs
)
266 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
267 current
->comm
, current
->pid
, current_thread_info(), current
);
268 show_stack(current
, (long *) regs
->regs
[29]);
269 show_code((unsigned int *) regs
->cp0_epc
);
273 static DEFINE_SPINLOCK(die_lock
);
275 NORET_TYPE
void ATTRIB_NORET
die(const char * str
, struct pt_regs
* regs
)
277 static int die_counter
;
278 #ifdef CONFIG_MIPS_MT_SMTC
279 unsigned long dvpret
= dvpe();
280 #endif /* CONFIG_MIPS_MT_SMTC */
283 spin_lock_irq(&die_lock
);
285 #ifdef CONFIG_MIPS_MT_SMTC
286 mips_mt_regdump(dvpret
);
287 #endif /* CONFIG_MIPS_MT_SMTC */
288 printk("%s[#%d]:\n", str
, ++die_counter
);
289 show_registers(regs
);
290 spin_unlock_irq(&die_lock
);
293 panic("Fatal exception in interrupt");
296 printk(KERN_EMERG
"Fatal exception: panic in 5 seconds\n");
298 panic("Fatal exception");
304 extern const struct exception_table_entry __start___dbe_table
[];
305 extern const struct exception_table_entry __stop___dbe_table
[];
307 void __declare_dbe_table(void)
309 __asm__
__volatile__(
310 ".section\t__dbe_table,\"a\"\n\t"
315 /* Given an address, look for it in the exception tables. */
316 static const struct exception_table_entry
*search_dbe_tables(unsigned long addr
)
318 const struct exception_table_entry
*e
;
320 e
= search_extable(__start___dbe_table
, __stop___dbe_table
- 1, addr
);
322 e
= search_module_dbetables(addr
);
326 asmlinkage
void do_be(struct pt_regs
*regs
)
328 const int field
= 2 * sizeof(unsigned long);
329 const struct exception_table_entry
*fixup
= NULL
;
330 int data
= regs
->cp0_cause
& 4;
331 int action
= MIPS_BE_FATAL
;
333 /* XXX For now. Fixme, this searches the wrong table ... */
334 if (data
&& !user_mode(regs
))
335 fixup
= search_dbe_tables(exception_epc(regs
));
338 action
= MIPS_BE_FIXUP
;
340 if (board_be_handler
)
341 action
= board_be_handler(regs
, fixup
!= 0);
344 case MIPS_BE_DISCARD
:
348 regs
->cp0_epc
= fixup
->nextinsn
;
357 * Assume it would be too dangerous to continue ...
359 printk(KERN_ALERT
"%s bus error, epc == %0*lx, ra == %0*lx\n",
360 data
? "Data" : "Instruction",
361 field
, regs
->cp0_epc
, field
, regs
->regs
[31]);
362 die_if_kernel("Oops", regs
);
363 force_sig(SIGBUS
, current
);
366 static inline int get_insn_opcode(struct pt_regs
*regs
, unsigned int *opcode
)
368 unsigned int __user
*epc
;
370 epc
= (unsigned int __user
*) regs
->cp0_epc
+
371 ((regs
->cp0_cause
& CAUSEF_BD
) != 0);
372 if (!get_user(*opcode
, epc
))
375 force_sig(SIGSEGV
, current
);
383 #define OPCODE 0xfc000000
384 #define BASE 0x03e00000
385 #define RT 0x001f0000
386 #define OFFSET 0x0000ffff
387 #define LL 0xc0000000
388 #define SC 0xe0000000
389 #define SPEC3 0x7c000000
390 #define RD 0x0000f800
391 #define FUNC 0x0000003f
392 #define RDHWR 0x0000003b
395 * The ll_bit is cleared by r*_switch.S
398 unsigned long ll_bit
;
400 static struct task_struct
*ll_task
= NULL
;
402 static inline void simulate_ll(struct pt_regs
*regs
, unsigned int opcode
)
404 unsigned long value
, __user
*vaddr
;
409 * analyse the ll instruction that just caused a ri exception
410 * and put the referenced address to addr.
413 /* sign extend offset */
414 offset
= opcode
& OFFSET
;
418 vaddr
= (unsigned long __user
*)
419 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
421 if ((unsigned long)vaddr
& 3) {
425 if (get_user(value
, vaddr
)) {
432 if (ll_task
== NULL
|| ll_task
== current
) {
441 compute_return_epc(regs
);
443 regs
->regs
[(opcode
& RT
) >> 16] = value
;
448 force_sig(signal
, current
);
451 static inline void simulate_sc(struct pt_regs
*regs
, unsigned int opcode
)
453 unsigned long __user
*vaddr
;
459 * analyse the sc instruction that just caused a ri exception
460 * and put the referenced address to addr.
463 /* sign extend offset */
464 offset
= opcode
& OFFSET
;
468 vaddr
= (unsigned long __user
*)
469 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
470 reg
= (opcode
& RT
) >> 16;
472 if ((unsigned long)vaddr
& 3) {
479 if (ll_bit
== 0 || ll_task
!= current
) {
480 compute_return_epc(regs
);
488 if (put_user(regs
->regs
[reg
], vaddr
)) {
493 compute_return_epc(regs
);
499 force_sig(signal
, current
);
503 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
504 * opcodes are supposed to result in coprocessor unusable exceptions if
505 * executed on ll/sc-less processors. That's the theory. In practice a
506 * few processors such as NEC's VR4100 throw reserved instruction exceptions
507 * instead, so we're doing the emulation thing in both exception handlers.
509 static inline int simulate_llsc(struct pt_regs
*regs
)
513 if (unlikely(get_insn_opcode(regs
, &opcode
)))
516 if ((opcode
& OPCODE
) == LL
) {
517 simulate_ll(regs
, opcode
);
520 if ((opcode
& OPCODE
) == SC
) {
521 simulate_sc(regs
, opcode
);
525 return -EFAULT
; /* Strange things going on ... */
529 * Simulate trapping 'rdhwr' instructions to provide user accessible
530 * registers not implemented in hardware. The only current use of this
531 * is the thread area pointer.
533 static inline int simulate_rdhwr(struct pt_regs
*regs
)
535 struct thread_info
*ti
= task_thread_info(current
);
538 if (unlikely(get_insn_opcode(regs
, &opcode
)))
541 if (unlikely(compute_return_epc(regs
)))
544 if ((opcode
& OPCODE
) == SPEC3
&& (opcode
& FUNC
) == RDHWR
) {
545 int rd
= (opcode
& RD
) >> 11;
546 int rt
= (opcode
& RT
) >> 16;
549 regs
->regs
[rt
] = ti
->tp_value
;
560 asmlinkage
void do_ov(struct pt_regs
*regs
)
564 die_if_kernel("Integer overflow", regs
);
566 info
.si_code
= FPE_INTOVF
;
567 info
.si_signo
= SIGFPE
;
569 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
570 force_sig_info(SIGFPE
, &info
, current
);
574 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
576 asmlinkage
void do_fpe(struct pt_regs
*regs
, unsigned long fcr31
)
578 die_if_kernel("FP exception in kernel code", regs
);
580 if (fcr31
& FPU_CSR_UNI_X
) {
585 #ifdef CONFIG_PREEMPT
586 if (!is_fpu_owner()) {
587 /* We might lose fpu before disabling preempt... */
589 BUG_ON(!used_math());
594 * Unimplemented operation exception. If we've got the full
595 * software emulator on-board, let's use it...
597 * Force FPU to dump state into task/thread context. We're
598 * moving a lot of data here for what is probably a single
599 * instruction, but the alternative is to pre-decode the FP
600 * register operands before invoking the emulator, which seems
601 * a bit extreme for what should be an infrequent event.
604 /* Ensure 'resume' not overwrite saved fp context again. */
609 /* Run the emulator */
610 sig
= fpu_emulator_cop1Handler (regs
, ¤t
->thread
.fpu
);
614 own_fpu(); /* Using the FPU again. */
616 * We can't allow the emulated instruction to leave any of
617 * the cause bit set in $fcr31.
619 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
621 /* Restore the hardware register state */
626 /* If something went wrong, signal */
628 force_sig(sig
, current
);
633 force_sig(SIGFPE
, current
);
636 asmlinkage
void do_bp(struct pt_regs
*regs
)
638 unsigned int opcode
, bcode
;
641 die_if_kernel("Break instruction in kernel code", regs
);
643 if (get_insn_opcode(regs
, &opcode
))
647 * There is the ancient bug in the MIPS assemblers that the break
648 * code starts left to bit 16 instead to bit 6 in the opcode.
649 * Gas is bug-compatible, but not always, grrr...
650 * We handle both cases with a simple heuristics. --macro
652 bcode
= ((opcode
>> 6) & ((1 << 20) - 1));
653 if (bcode
< (1 << 10))
657 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
658 * insns, even for break codes that indicate arithmetic failures.
660 * But should we continue the brokenness??? --macro
663 case BRK_OVERFLOW
<< 10:
664 case BRK_DIVZERO
<< 10:
665 if (bcode
== (BRK_DIVZERO
<< 10))
666 info
.si_code
= FPE_INTDIV
;
668 info
.si_code
= FPE_INTOVF
;
669 info
.si_signo
= SIGFPE
;
671 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
672 force_sig_info(SIGFPE
, &info
, current
);
675 force_sig(SIGTRAP
, current
);
679 asmlinkage
void do_tr(struct pt_regs
*regs
)
681 unsigned int opcode
, tcode
= 0;
684 die_if_kernel("Trap instruction in kernel code", regs
);
686 if (get_insn_opcode(regs
, &opcode
))
689 /* Immediate versions don't provide a code. */
690 if (!(opcode
& OPCODE
))
691 tcode
= ((opcode
>> 6) & ((1 << 10) - 1));
694 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
695 * insns, even for trap codes that indicate arithmetic failures.
697 * But should we continue the brokenness??? --macro
702 if (tcode
== BRK_DIVZERO
)
703 info
.si_code
= FPE_INTDIV
;
705 info
.si_code
= FPE_INTOVF
;
706 info
.si_signo
= SIGFPE
;
708 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
709 force_sig_info(SIGFPE
, &info
, current
);
712 force_sig(SIGTRAP
, current
);
716 asmlinkage
void do_ri(struct pt_regs
*regs
)
718 die_if_kernel("Reserved instruction in kernel code", regs
);
721 if (!simulate_llsc(regs
))
724 if (!simulate_rdhwr(regs
))
727 force_sig(SIGILL
, current
);
730 asmlinkage
void do_cpu(struct pt_regs
*regs
)
734 die_if_kernel("do_cpu invoked from kernel context!", regs
);
736 cpid
= (regs
->cp0_cause
>> CAUSEB_CE
) & 3;
741 if (!simulate_llsc(regs
))
744 if (!simulate_rdhwr(regs
))
753 if (used_math()) { /* Using the FPU again. */
755 } else { /* First time FPU user. */
763 int sig
= fpu_emulator_cop1Handler(regs
,
764 ¤t
->thread
.fpu
);
766 force_sig(sig
, current
);
767 #ifdef CONFIG_MIPS_MT_FPAFF
770 * MIPS MT processors may have fewer FPU contexts
771 * than CPU threads. If we've emulated more than
772 * some threshold number of instructions, force
773 * migration to a "CPU" that has FP support.
775 if(mt_fpemul_threshold
> 0
776 && ((current
->thread
.emulated_fp
++
777 > mt_fpemul_threshold
))) {
779 * If there's no FPU present, or if the
780 * application has already restricted
781 * the allowed set to exclude any CPUs
782 * with FPUs, we'll skip the procedure.
784 if (cpus_intersects(current
->cpus_allowed
,
789 current
->thread
.user_cpus_allowed
,
791 set_cpus_allowed(current
, tmask
);
792 current
->thread
.mflags
|= MF_FPUBOUND
;
796 #endif /* CONFIG_MIPS_MT_FPAFF */
803 die_if_kernel("do_cpu invoked from kernel context!", regs
);
807 force_sig(SIGILL
, current
);
810 asmlinkage
void do_mdmx(struct pt_regs
*regs
)
812 force_sig(SIGILL
, current
);
815 asmlinkage
void do_watch(struct pt_regs
*regs
)
818 * We use the watch exception where available to detect stack
823 panic("Caught WATCH exception - probably caused by stack overflow.");
826 asmlinkage
void do_mcheck(struct pt_regs
*regs
)
828 const int field
= 2 * sizeof(unsigned long);
829 int multi_match
= regs
->cp0_status
& ST0_TS
;
834 printk("Index : %0x\n", read_c0_index());
835 printk("Pagemask: %0x\n", read_c0_pagemask());
836 printk("EntryHi : %0*lx\n", field
, read_c0_entryhi());
837 printk("EntryLo0: %0*lx\n", field
, read_c0_entrylo0());
838 printk("EntryLo1: %0*lx\n", field
, read_c0_entrylo1());
843 show_code((unsigned int *) regs
->cp0_epc
);
846 * Some chips may have other causes of machine check (e.g. SB1
849 panic("Caught Machine Check exception - %scaused by multiple "
850 "matching entries in the TLB.",
851 (multi_match
) ? "" : "not ");
854 asmlinkage
void do_mt(struct pt_regs
*regs
)
858 subcode
= (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT
)
859 >> VPECONTROL_EXCPT_SHIFT
;
862 printk(KERN_DEBUG
"Thread Underflow\n");
865 printk(KERN_DEBUG
"Thread Overflow\n");
868 printk(KERN_DEBUG
"Invalid YIELD Qualifier\n");
871 printk(KERN_DEBUG
"Gating Storage Exception\n");
874 printk(KERN_DEBUG
"YIELD Scheduler Exception\n");
877 printk(KERN_DEBUG
"Gating Storage Schedulier Exception\n");
880 printk(KERN_DEBUG
"*** UNKNOWN THREAD EXCEPTION %d ***\n",
884 die_if_kernel("MIPS MT Thread exception in kernel", regs
);
886 force_sig(SIGILL
, current
);
890 asmlinkage
void do_dsp(struct pt_regs
*regs
)
893 panic("Unexpected DSP exception\n");
895 force_sig(SIGILL
, current
);
898 asmlinkage
void do_reserved(struct pt_regs
*regs
)
901 * Game over - no way to handle this if it ever occurs. Most probably
902 * caused by a new unknown cpu type or after another deadly
903 * hard/software error.
906 panic("Caught reserved exception %ld - should not happen.",
907 (regs
->cp0_cause
& 0x7f) >> 2);
910 asmlinkage
void do_default_vi(struct pt_regs
*regs
)
913 panic("Caught unexpected vectored interrupt.");
917 * Some MIPS CPUs can enable/disable for cache parity detection, but do
920 static inline void parity_protection_init(void)
922 switch (current_cpu_data
.cputype
) {
926 write_c0_ecc(0x80000000);
927 back_to_back_c0_hazard();
928 /* Set the PE bit (bit 31) in the c0_errctl register. */
929 printk(KERN_INFO
"Cache parity protection %sabled\n",
930 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
934 /* Clear the DE bit (bit 16) in the c0_status register. */
935 printk(KERN_INFO
"Enable cache parity protection for "
936 "MIPS 20KC/25KF CPUs.\n");
937 clear_c0_status(ST0_DE
);
944 asmlinkage
void cache_parity_error(void)
946 const int field
= 2 * sizeof(unsigned long);
947 unsigned int reg_val
;
949 /* For the moment, report the problem and hang. */
950 printk("Cache error exception:\n");
951 printk("cp0_errorepc == %0*lx\n", field
, read_c0_errorepc());
952 reg_val
= read_c0_cacheerr();
953 printk("c0_cacheerr == %08x\n", reg_val
);
955 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
956 reg_val
& (1<<30) ? "secondary" : "primary",
957 reg_val
& (1<<31) ? "data" : "insn");
958 printk("Error bits: %s%s%s%s%s%s%s\n",
959 reg_val
& (1<<29) ? "ED " : "",
960 reg_val
& (1<<28) ? "ET " : "",
961 reg_val
& (1<<26) ? "EE " : "",
962 reg_val
& (1<<25) ? "EB " : "",
963 reg_val
& (1<<24) ? "EI " : "",
964 reg_val
& (1<<23) ? "E1 " : "",
965 reg_val
& (1<<22) ? "E0 " : "");
966 printk("IDX: 0x%08x\n", reg_val
& ((1<<22)-1));
968 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
969 if (reg_val
& (1<<22))
970 printk("DErrAddr0: 0x%0*lx\n", field
, read_c0_derraddr0());
972 if (reg_val
& (1<<23))
973 printk("DErrAddr1: 0x%0*lx\n", field
, read_c0_derraddr1());
976 panic("Can't handle the cache error!");
980 * SDBBP EJTAG debug exception handler.
981 * We skip the instruction and return to the next instruction.
983 void ejtag_exception_handler(struct pt_regs
*regs
)
985 const int field
= 2 * sizeof(unsigned long);
986 unsigned long depc
, old_epc
;
989 printk(KERN_DEBUG
"SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
990 depc
= read_c0_depc();
991 debug
= read_c0_debug();
992 printk(KERN_DEBUG
"c0_depc = %0*lx, DEBUG = %08x\n", field
, depc
, debug
);
993 if (debug
& 0x80000000) {
995 * In branch delay slot.
996 * We cheat a little bit here and use EPC to calculate the
997 * debug return address (DEPC). EPC is restored after the
1000 old_epc
= regs
->cp0_epc
;
1001 regs
->cp0_epc
= depc
;
1002 __compute_return_epc(regs
);
1003 depc
= regs
->cp0_epc
;
1004 regs
->cp0_epc
= old_epc
;
1007 write_c0_depc(depc
);
1010 printk(KERN_DEBUG
"\n\n----- Enable EJTAG single stepping ----\n\n");
1011 write_c0_debug(debug
| 0x100);
1016 * NMI exception handler.
1018 void nmi_exception_handler(struct pt_regs
*regs
)
1020 #ifdef CONFIG_MIPS_MT_SMTC
1021 unsigned long dvpret
= dvpe();
1023 printk("NMI taken!!!!\n");
1024 mips_mt_regdump(dvpret
);
1027 printk("NMI taken!!!!\n");
1028 #endif /* CONFIG_MIPS_MT_SMTC */
1033 #define VECTORSPACING 0x100 /* for EI/VI mode */
1035 unsigned long ebase
;
1036 unsigned long exception_handlers
[32];
1037 unsigned long vi_handlers
[64];
1040 * As a side effect of the way this is implemented we're limited
1041 * to interrupt handlers in the address range from
1042 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1044 void *set_except_vector(int n
, void *addr
)
1046 unsigned long handler
= (unsigned long) addr
;
1047 unsigned long old_handler
= exception_handlers
[n
];
1049 exception_handlers
[n
] = handler
;
1050 if (n
== 0 && cpu_has_divec
) {
1051 *(volatile u32
*)(ebase
+ 0x200) = 0x08000000 |
1052 (0x03ffffff & (handler
>> 2));
1053 flush_icache_range(ebase
+ 0x200, ebase
+ 0x204);
1055 return (void *)old_handler
;
1058 #ifdef CONFIG_CPU_MIPSR2_SRS
1060 * MIPSR2 shadow register set allocation
1064 static struct shadow_registers
{
1066 * Number of shadow register sets supported
1068 unsigned long sr_supported
;
1070 * Bitmap of allocated shadow registers
1072 unsigned long sr_allocated
;
1075 static void mips_srs_init(void)
1077 shadow_registers
.sr_supported
= ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1078 printk(KERN_INFO
"%d MIPSR2 register sets available\n",
1079 shadow_registers
.sr_supported
);
1080 shadow_registers
.sr_allocated
= 1; /* Set 0 used by kernel */
1083 int mips_srs_max(void)
1085 return shadow_registers
.sr_supported
;
1088 int mips_srs_alloc(void)
1090 struct shadow_registers
*sr
= &shadow_registers
;
1094 set
= find_first_zero_bit(&sr
->sr_allocated
, sr
->sr_supported
);
1095 if (set
>= sr
->sr_supported
)
1098 if (test_and_set_bit(set
, &sr
->sr_allocated
))
1104 void mips_srs_free(int set
)
1106 struct shadow_registers
*sr
= &shadow_registers
;
1108 clear_bit(set
, &sr
->sr_allocated
);
1111 static void *set_vi_srs_handler(int n
, void *addr
, int srs
)
1113 unsigned long handler
;
1114 unsigned long old_handler
= vi_handlers
[n
];
1118 if (!cpu_has_veic
&& !cpu_has_vint
)
1122 handler
= (unsigned long) do_default_vi
;
1125 handler
= (unsigned long) addr
;
1126 vi_handlers
[n
] = (unsigned long) addr
;
1128 b
= (unsigned char *)(ebase
+ 0x200 + n
*VECTORSPACING
);
1130 if (srs
>= mips_srs_max())
1131 panic("Shadow register set %d not supported", srs
);
1134 if (board_bind_eic_interrupt
)
1135 board_bind_eic_interrupt (n
, srs
);
1136 } else if (cpu_has_vint
) {
1137 /* SRSMap is only defined if shadow sets are implemented */
1138 if (mips_srs_max() > 1)
1139 change_c0_srsmap (0xf << n
*4, srs
<< n
*4);
1144 * If no shadow set is selected then use the default handler
1145 * that does normal register saving and a standard interrupt exit
1148 extern char except_vec_vi
, except_vec_vi_lui
;
1149 extern char except_vec_vi_ori
, except_vec_vi_end
;
1150 #ifdef CONFIG_MIPS_MT_SMTC
1152 * We need to provide the SMTC vectored interrupt handler
1153 * not only with the address of the handler, but with the
1154 * Status.IM bit to be masked before going there.
1156 extern char except_vec_vi_mori
;
1157 const int mori_offset
= &except_vec_vi_mori
- &except_vec_vi
;
1158 #endif /* CONFIG_MIPS_MT_SMTC */
1159 const int handler_len
= &except_vec_vi_end
- &except_vec_vi
;
1160 const int lui_offset
= &except_vec_vi_lui
- &except_vec_vi
;
1161 const int ori_offset
= &except_vec_vi_ori
- &except_vec_vi
;
1163 if (handler_len
> VECTORSPACING
) {
1165 * Sigh... panicing won't help as the console
1166 * is probably not configured :(
1168 panic ("VECTORSPACING too small");
1171 memcpy (b
, &except_vec_vi
, handler_len
);
1172 #ifdef CONFIG_MIPS_MT_SMTC
1174 printk("Vector index %d exceeds SMTC maximum\n", n
);
1175 w
= (u32
*)(b
+ mori_offset
);
1176 *w
= (*w
& 0xffff0000) | (0x100 << n
);
1177 #endif /* CONFIG_MIPS_MT_SMTC */
1178 w
= (u32
*)(b
+ lui_offset
);
1179 *w
= (*w
& 0xffff0000) | (((u32
)handler
>> 16) & 0xffff);
1180 w
= (u32
*)(b
+ ori_offset
);
1181 *w
= (*w
& 0xffff0000) | ((u32
)handler
& 0xffff);
1182 flush_icache_range((unsigned long)b
, (unsigned long)(b
+handler_len
));
1186 * In other cases jump directly to the interrupt handler
1188 * It is the handlers responsibility to save registers if required
1189 * (eg hi/lo) and return from the exception using "eret"
1192 *w
++ = 0x08000000 | (((u32
)handler
>> 2) & 0x03fffff); /* j handler */
1194 flush_icache_range((unsigned long)b
, (unsigned long)(b
+8));
1197 return (void *)old_handler
;
1200 void *set_vi_handler(int n
, void *addr
)
1202 return set_vi_srs_handler(n
, addr
, 0);
1207 static inline void mips_srs_init(void)
1211 #endif /* CONFIG_CPU_MIPSR2_SRS */
1214 * This is used by native signal handling
1216 asmlinkage
int (*save_fp_context
)(struct sigcontext
*sc
);
1217 asmlinkage
int (*restore_fp_context
)(struct sigcontext
*sc
);
1219 extern asmlinkage
int _save_fp_context(struct sigcontext
*sc
);
1220 extern asmlinkage
int _restore_fp_context(struct sigcontext
*sc
);
1222 extern asmlinkage
int fpu_emulator_save_context(struct sigcontext
*sc
);
1223 extern asmlinkage
int fpu_emulator_restore_context(struct sigcontext
*sc
);
1226 static int smp_save_fp_context(struct sigcontext
*sc
)
1229 ? _save_fp_context(sc
)
1230 : fpu_emulator_save_context(sc
);
1233 static int smp_restore_fp_context(struct sigcontext
*sc
)
1236 ? _restore_fp_context(sc
)
1237 : fpu_emulator_restore_context(sc
);
1241 static inline void signal_init(void)
1244 /* For now just do the cpu_has_fpu check when the functions are invoked */
1245 save_fp_context
= smp_save_fp_context
;
1246 restore_fp_context
= smp_restore_fp_context
;
1249 save_fp_context
= _save_fp_context
;
1250 restore_fp_context
= _restore_fp_context
;
1252 save_fp_context
= fpu_emulator_save_context
;
1253 restore_fp_context
= fpu_emulator_restore_context
;
1258 #ifdef CONFIG_MIPS32_COMPAT
1261 * This is used by 32-bit signal stuff on the 64-bit kernel
1263 asmlinkage
int (*save_fp_context32
)(struct sigcontext32
*sc
);
1264 asmlinkage
int (*restore_fp_context32
)(struct sigcontext32
*sc
);
1266 extern asmlinkage
int _save_fp_context32(struct sigcontext32
*sc
);
1267 extern asmlinkage
int _restore_fp_context32(struct sigcontext32
*sc
);
1269 extern asmlinkage
int fpu_emulator_save_context32(struct sigcontext32
*sc
);
1270 extern asmlinkage
int fpu_emulator_restore_context32(struct sigcontext32
*sc
);
1272 static inline void signal32_init(void)
1275 save_fp_context32
= _save_fp_context32
;
1276 restore_fp_context32
= _restore_fp_context32
;
1278 save_fp_context32
= fpu_emulator_save_context32
;
1279 restore_fp_context32
= fpu_emulator_restore_context32
;
1284 extern void cpu_cache_init(void);
1285 extern void tlb_init(void);
1286 extern void flush_tlb_handlers(void);
1288 void __init
per_cpu_trap_init(void)
1290 unsigned int cpu
= smp_processor_id();
1291 unsigned int status_set
= ST0_CU0
;
1292 #ifdef CONFIG_MIPS_MT_SMTC
1293 int secondaryTC
= 0;
1294 int bootTC
= (cpu
== 0);
1297 * Only do per_cpu_trap_init() for first TC of Each VPE.
1298 * Note that this hack assumes that the SMTC init code
1299 * assigns TCs consecutively and in ascending order.
1302 if (((read_c0_tcbind() & TCBIND_CURTC
) != 0) &&
1303 ((read_c0_tcbind() & TCBIND_CURVPE
) == cpu_data
[cpu
- 1].vpe_id
))
1305 #endif /* CONFIG_MIPS_MT_SMTC */
1308 * Disable coprocessors and select 32-bit or 64-bit addressing
1309 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1310 * flag that some firmware may have left set and the TS bit (for
1311 * IP27). Set XX for ISA IV code to work.
1314 status_set
|= ST0_FR
|ST0_KX
|ST0_SX
|ST0_UX
;
1316 if (current_cpu_data
.isa_level
== MIPS_CPU_ISA_IV
)
1317 status_set
|= ST0_XX
;
1318 change_c0_status(ST0_CU
|ST0_MX
|ST0_RE
|ST0_FR
|ST0_BEV
|ST0_TS
|ST0_KX
|ST0_SX
|ST0_UX
,
1322 set_c0_status(ST0_MX
);
1324 #ifdef CONFIG_CPU_MIPSR2
1325 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1328 #ifdef CONFIG_MIPS_MT_SMTC
1330 #endif /* CONFIG_MIPS_MT_SMTC */
1333 * Interrupt handling.
1335 if (cpu_has_veic
|| cpu_has_vint
) {
1336 write_c0_ebase (ebase
);
1337 /* Setting vector spacing enables EI/VI mode */
1338 change_c0_intctl (0x3e0, VECTORSPACING
);
1340 if (cpu_has_divec
) {
1341 if (cpu_has_mipsmt
) {
1342 unsigned int vpflags
= dvpe();
1343 set_c0_cause(CAUSEF_IV
);
1346 set_c0_cause(CAUSEF_IV
);
1348 #ifdef CONFIG_MIPS_MT_SMTC
1350 #endif /* CONFIG_MIPS_MT_SMTC */
1352 cpu_data
[cpu
].asid_cache
= ASID_FIRST_VERSION
;
1353 TLBMISS_HANDLER_SETUP();
1355 atomic_inc(&init_mm
.mm_count
);
1356 current
->active_mm
= &init_mm
;
1357 BUG_ON(current
->mm
);
1358 enter_lazy_tlb(&init_mm
, current
);
1360 #ifdef CONFIG_MIPS_MT_SMTC
1362 #endif /* CONFIG_MIPS_MT_SMTC */
1365 #ifdef CONFIG_MIPS_MT_SMTC
1367 #endif /* CONFIG_MIPS_MT_SMTC */
1370 /* Install CPU exception handler */
1371 void __init
set_handler (unsigned long offset
, void *addr
, unsigned long size
)
1373 memcpy((void *)(ebase
+ offset
), addr
, size
);
1374 flush_icache_range(ebase
+ offset
, ebase
+ offset
+ size
);
1377 /* Install uncached CPU exception handler */
1378 void __init
set_uncached_handler (unsigned long offset
, void *addr
, unsigned long size
)
1381 unsigned long uncached_ebase
= KSEG1ADDR(ebase
);
1384 unsigned long uncached_ebase
= TO_UNCAC(ebase
);
1387 memcpy((void *)(uncached_ebase
+ offset
), addr
, size
);
1390 void __init
trap_init(void)
1392 extern char except_vec3_generic
, except_vec3_r4000
;
1393 extern char except_vec4
;
1396 if (cpu_has_veic
|| cpu_has_vint
)
1397 ebase
= (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING
*64);
1403 per_cpu_trap_init();
1406 * Copy the generic exception handlers to their final destination.
1407 * This will be overriden later as suitable for a particular
1410 set_handler(0x180, &except_vec3_generic
, 0x80);
1413 * Setup default vectors
1415 for (i
= 0; i
<= 31; i
++)
1416 set_except_vector(i
, handle_reserved
);
1419 * Copy the EJTAG debug exception vector handler code to it's final
1422 if (cpu_has_ejtag
&& board_ejtag_handler_setup
)
1423 board_ejtag_handler_setup ();
1426 * Only some CPUs have the watch exceptions.
1429 set_except_vector(23, handle_watch
);
1432 * Initialise interrupt handlers
1434 if (cpu_has_veic
|| cpu_has_vint
) {
1435 int nvec
= cpu_has_veic
? 64 : 8;
1436 for (i
= 0; i
< nvec
; i
++)
1437 set_vi_handler(i
, NULL
);
1439 else if (cpu_has_divec
)
1440 set_handler(0x200, &except_vec4
, 0x8);
1443 * Some CPUs can enable/disable for cache parity detection, but does
1444 * it different ways.
1446 parity_protection_init();
1449 * The Data Bus Errors / Instruction Bus Errors are signaled
1450 * by external hardware. Therefore these two exceptions
1451 * may have board specific handlers.
1456 set_except_vector(0, handle_int
);
1457 set_except_vector(1, handle_tlbm
);
1458 set_except_vector(2, handle_tlbl
);
1459 set_except_vector(3, handle_tlbs
);
1461 set_except_vector(4, handle_adel
);
1462 set_except_vector(5, handle_ades
);
1464 set_except_vector(6, handle_ibe
);
1465 set_except_vector(7, handle_dbe
);
1467 set_except_vector(8, handle_sys
);
1468 set_except_vector(9, handle_bp
);
1469 set_except_vector(10, handle_ri
);
1470 set_except_vector(11, handle_cpu
);
1471 set_except_vector(12, handle_ov
);
1472 set_except_vector(13, handle_tr
);
1474 if (current_cpu_data
.cputype
== CPU_R6000
||
1475 current_cpu_data
.cputype
== CPU_R6000A
) {
1477 * The R6000 is the only R-series CPU that features a machine
1478 * check exception (similar to the R4000 cache error) and
1479 * unaligned ldc1/sdc1 exception. The handlers have not been
1480 * written yet. Well, anyway there is no R6000 machine on the
1481 * current list of targets for Linux/MIPS.
1482 * (Duh, crap, there is someone with a triple R6k machine)
1484 //set_except_vector(14, handle_mc);
1485 //set_except_vector(15, handle_ndc);
1489 if (board_nmi_handler_setup
)
1490 board_nmi_handler_setup();
1492 if (cpu_has_fpu
&& !cpu_has_nofpuex
)
1493 set_except_vector(15, handle_fpe
);
1495 set_except_vector(22, handle_mdmx
);
1498 set_except_vector(24, handle_mcheck
);
1501 set_except_vector(25, handle_mt
);
1504 set_except_vector(26, handle_dsp
);
1507 /* Special exception: R4[04]00 uses also the divec space. */
1508 memcpy((void *)(CAC_BASE
+ 0x180), &except_vec3_r4000
, 0x100);
1509 else if (cpu_has_4kex
)
1510 memcpy((void *)(CAC_BASE
+ 0x180), &except_vec3_generic
, 0x80);
1512 memcpy((void *)(CAC_BASE
+ 0x080), &except_vec3_generic
, 0x80);
1515 #ifdef CONFIG_MIPS32_COMPAT
1519 flush_icache_range(ebase
, ebase
+ 0x400);
1520 flush_tlb_handlers();