2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
25 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/spinlock.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bootmem.h>
30 #include <linux/interrupt.h>
31 #include <linux/ptrace.h>
32 #include <linux/kgdb.h>
33 #include <linux/kdebug.h>
34 #include <linux/kprobes.h>
35 #include <linux/notifier.h>
36 #include <linux/kdb.h>
37 #include <linux/irq.h>
38 #include <linux/perf_event.h>
40 #include <asm/addrspace.h>
41 #include <asm/bootinfo.h>
42 #include <asm/branch.h>
43 #include <asm/break.h>
46 #include <asm/cpu-type.h>
49 #include <asm/fpu_emulator.h>
51 #include <asm/mips-r2-to-r6-emul.h>
52 #include <asm/mipsregs.h>
53 #include <asm/mipsmtregs.h>
54 #include <asm/module.h>
56 #include <asm/pgtable.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/siginfo.h>
60 #include <asm/tlbdebug.h>
61 #include <asm/traps.h>
62 #include <asm/uaccess.h>
63 #include <asm/watch.h>
64 #include <asm/mmu_context.h>
65 #include <asm/types.h>
66 #include <asm/stacktrace.h>
69 extern void check_wait(void);
70 extern asmlinkage
void rollback_handle_int(void);
71 extern asmlinkage
void handle_int(void);
72 extern u32 handle_tlbl
[];
73 extern u32 handle_tlbs
[];
74 extern u32 handle_tlbm
[];
75 extern asmlinkage
void handle_adel(void);
76 extern asmlinkage
void handle_ades(void);
77 extern asmlinkage
void handle_ibe(void);
78 extern asmlinkage
void handle_dbe(void);
79 extern asmlinkage
void handle_sys(void);
80 extern asmlinkage
void handle_bp(void);
81 extern asmlinkage
void handle_ri(void);
82 extern asmlinkage
void handle_ri_rdhwr_vivt(void);
83 extern asmlinkage
void handle_ri_rdhwr(void);
84 extern asmlinkage
void handle_cpu(void);
85 extern asmlinkage
void handle_ov(void);
86 extern asmlinkage
void handle_tr(void);
87 extern asmlinkage
void handle_msa_fpe(void);
88 extern asmlinkage
void handle_fpe(void);
89 extern asmlinkage
void handle_ftlb(void);
90 extern asmlinkage
void handle_msa(void);
91 extern asmlinkage
void handle_mdmx(void);
92 extern asmlinkage
void handle_watch(void);
93 extern asmlinkage
void handle_mt(void);
94 extern asmlinkage
void handle_dsp(void);
95 extern asmlinkage
void handle_mcheck(void);
96 extern asmlinkage
void handle_reserved(void);
97 extern void tlb_do_page_fault_0(void);
99 void (*board_be_init
)(void);
100 int (*board_be_handler
)(struct pt_regs
*regs
, int is_fixup
);
101 void (*board_nmi_handler_setup
)(void);
102 void (*board_ejtag_handler_setup
)(void);
103 void (*board_bind_eic_interrupt
)(int irq
, int regset
);
104 void (*board_ebase_setup
)(void);
105 void(*board_cache_error_setup
)(void);
107 static void show_raw_backtrace(unsigned long reg29
)
109 unsigned long *sp
= (unsigned long *)(reg29
& ~3);
112 printk("Call Trace:");
113 #ifdef CONFIG_KALLSYMS
116 while (!kstack_end(sp
)) {
117 unsigned long __user
*p
=
118 (unsigned long __user
*)(unsigned long)sp
++;
119 if (__get_user(addr
, p
)) {
120 printk(" (Bad stack address)");
123 if (__kernel_text_address(addr
))
129 #ifdef CONFIG_KALLSYMS
131 static int __init
set_raw_show_trace(char *str
)
136 __setup("raw_show_trace", set_raw_show_trace
);
139 static void show_backtrace(struct task_struct
*task
, const struct pt_regs
*regs
)
141 unsigned long sp
= regs
->regs
[29];
142 unsigned long ra
= regs
->regs
[31];
143 unsigned long pc
= regs
->cp0_epc
;
148 if (raw_show_trace
|| user_mode(regs
) || !__kernel_text_address(pc
)) {
149 show_raw_backtrace(sp
);
152 printk("Call Trace:\n");
155 pc
= unwind_stack(task
, &sp
, pc
, &ra
);
161 * This routine abuses get_user()/put_user() to reference pointers
162 * with at least a bit of error checking ...
164 static void show_stacktrace(struct task_struct
*task
,
165 const struct pt_regs
*regs
)
167 const int field
= 2 * sizeof(unsigned long);
170 unsigned long __user
*sp
= (unsigned long __user
*)regs
->regs
[29];
174 while ((unsigned long) sp
& (PAGE_SIZE
- 1)) {
175 if (i
&& ((i
% (64 / field
)) == 0))
182 if (__get_user(stackdata
, sp
++)) {
183 printk(" (Bad stack address)");
187 printk(" %0*lx", field
, stackdata
);
191 show_backtrace(task
, regs
);
194 void show_stack(struct task_struct
*task
, unsigned long *sp
)
197 mm_segment_t old_fs
= get_fs();
199 regs
.regs
[29] = (unsigned long)sp
;
203 if (task
&& task
!= current
) {
204 regs
.regs
[29] = task
->thread
.reg29
;
206 regs
.cp0_epc
= task
->thread
.reg31
;
207 #ifdef CONFIG_KGDB_KDB
208 } else if (atomic_read(&kgdb_active
) != -1 &&
210 memcpy(®s
, kdb_current_regs
, sizeof(regs
));
211 #endif /* CONFIG_KGDB_KDB */
213 prepare_frametrace(®s
);
217 * show_stack() deals exclusively with kernel mode, so be sure to access
218 * the stack in the kernel (not user) address space.
221 show_stacktrace(task
, ®s
);
225 static void show_code(unsigned int __user
*pc
)
228 unsigned short __user
*pc16
= NULL
;
232 if ((unsigned long)pc
& 1)
233 pc16
= (unsigned short __user
*)((unsigned long)pc
& ~1);
234 for(i
= -3 ; i
< 6 ; i
++) {
236 if (pc16
? __get_user(insn
, pc16
+ i
) : __get_user(insn
, pc
+ i
)) {
237 printk(" (Bad address in epc)\n");
240 printk("%c%0*x%c", (i
?' ':'<'), pc16
? 4 : 8, insn
, (i
?' ':'>'));
244 static void __show_regs(const struct pt_regs
*regs
)
246 const int field
= 2 * sizeof(unsigned long);
247 unsigned int cause
= regs
->cp0_cause
;
248 unsigned int exccode
;
251 show_regs_print_info(KERN_DEFAULT
);
254 * Saved main processor registers
256 for (i
= 0; i
< 32; ) {
260 printk(" %0*lx", field
, 0UL);
261 else if (i
== 26 || i
== 27)
262 printk(" %*s", field
, "");
264 printk(" %0*lx", field
, regs
->regs
[i
]);
271 #ifdef CONFIG_CPU_HAS_SMARTMIPS
272 printk("Acx : %0*lx\n", field
, regs
->acx
);
274 printk("Hi : %0*lx\n", field
, regs
->hi
);
275 printk("Lo : %0*lx\n", field
, regs
->lo
);
278 * Saved cp0 registers
280 printk("epc : %0*lx %pS\n", field
, regs
->cp0_epc
,
281 (void *) regs
->cp0_epc
);
282 printk("ra : %0*lx %pS\n", field
, regs
->regs
[31],
283 (void *) regs
->regs
[31]);
285 printk("Status: %08x ", (uint32_t) regs
->cp0_status
);
288 if (regs
->cp0_status
& ST0_KUO
)
290 if (regs
->cp0_status
& ST0_IEO
)
292 if (regs
->cp0_status
& ST0_KUP
)
294 if (regs
->cp0_status
& ST0_IEP
)
296 if (regs
->cp0_status
& ST0_KUC
)
298 if (regs
->cp0_status
& ST0_IEC
)
300 } else if (cpu_has_4kex
) {
301 if (regs
->cp0_status
& ST0_KX
)
303 if (regs
->cp0_status
& ST0_SX
)
305 if (regs
->cp0_status
& ST0_UX
)
307 switch (regs
->cp0_status
& ST0_KSU
) {
312 printk("SUPERVISOR ");
321 if (regs
->cp0_status
& ST0_ERL
)
323 if (regs
->cp0_status
& ST0_EXL
)
325 if (regs
->cp0_status
& ST0_IE
)
330 exccode
= (cause
& CAUSEF_EXCCODE
) >> CAUSEB_EXCCODE
;
331 printk("Cause : %08x (ExcCode %02x)\n", cause
, exccode
);
333 if (1 <= exccode
&& exccode
<= 5)
334 printk("BadVA : %0*lx\n", field
, regs
->cp0_badvaddr
);
336 printk("PrId : %08x (%s)\n", read_c0_prid(),
341 * FIXME: really the generic show_regs should take a const pointer argument.
343 void show_regs(struct pt_regs
*regs
)
345 __show_regs((struct pt_regs
*)regs
);
348 void show_registers(struct pt_regs
*regs
)
350 const int field
= 2 * sizeof(unsigned long);
351 mm_segment_t old_fs
= get_fs();
355 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
356 current
->comm
, current
->pid
, current_thread_info(), current
,
357 field
, current_thread_info()->tp_value
);
358 if (cpu_has_userlocal
) {
361 tls
= read_c0_userlocal();
362 if (tls
!= current_thread_info()->tp_value
)
363 printk("*HwTLS: %0*lx\n", field
, tls
);
366 if (!user_mode(regs
))
367 /* Necessary for getting the correct stack content */
369 show_stacktrace(current
, regs
);
370 show_code((unsigned int __user
*) regs
->cp0_epc
);
375 static DEFINE_RAW_SPINLOCK(die_lock
);
377 void __noreturn
die(const char *str
, struct pt_regs
*regs
)
379 static int die_counter
;
384 if (notify_die(DIE_OOPS
, str
, regs
, 0, current
->thread
.trap_nr
,
385 SIGSEGV
) == NOTIFY_STOP
)
389 raw_spin_lock_irq(&die_lock
);
392 printk("%s[#%d]:\n", str
, ++die_counter
);
393 show_registers(regs
);
394 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
395 raw_spin_unlock_irq(&die_lock
);
400 panic("Fatal exception in interrupt");
403 panic("Fatal exception");
405 if (regs
&& kexec_should_crash(current
))
411 extern struct exception_table_entry __start___dbe_table
[];
412 extern struct exception_table_entry __stop___dbe_table
[];
415 " .section __dbe_table, \"a\"\n"
418 /* Given an address, look for it in the exception tables. */
419 static const struct exception_table_entry
*search_dbe_tables(unsigned long addr
)
421 const struct exception_table_entry
*e
;
423 e
= search_extable(__start___dbe_table
, __stop___dbe_table
- 1, addr
);
425 e
= search_module_dbetables(addr
);
429 asmlinkage
void do_be(struct pt_regs
*regs
)
431 const int field
= 2 * sizeof(unsigned long);
432 const struct exception_table_entry
*fixup
= NULL
;
433 int data
= regs
->cp0_cause
& 4;
434 int action
= MIPS_BE_FATAL
;
435 enum ctx_state prev_state
;
437 prev_state
= exception_enter();
438 /* XXX For now. Fixme, this searches the wrong table ... */
439 if (data
&& !user_mode(regs
))
440 fixup
= search_dbe_tables(exception_epc(regs
));
443 action
= MIPS_BE_FIXUP
;
445 if (board_be_handler
)
446 action
= board_be_handler(regs
, fixup
!= NULL
);
449 case MIPS_BE_DISCARD
:
453 regs
->cp0_epc
= fixup
->nextinsn
;
462 * Assume it would be too dangerous to continue ...
464 printk(KERN_ALERT
"%s bus error, epc == %0*lx, ra == %0*lx\n",
465 data
? "Data" : "Instruction",
466 field
, regs
->cp0_epc
, field
, regs
->regs
[31]);
467 if (notify_die(DIE_OOPS
, "bus error", regs
, 0, current
->thread
.trap_nr
,
468 SIGBUS
) == NOTIFY_STOP
)
471 die_if_kernel("Oops", regs
);
472 force_sig(SIGBUS
, current
);
475 exception_exit(prev_state
);
479 * ll/sc, rdhwr, sync emulation
482 #define OPCODE 0xfc000000
483 #define BASE 0x03e00000
484 #define RT 0x001f0000
485 #define OFFSET 0x0000ffff
486 #define LL 0xc0000000
487 #define SC 0xe0000000
488 #define SPEC0 0x00000000
489 #define SPEC3 0x7c000000
490 #define RD 0x0000f800
491 #define FUNC 0x0000003f
492 #define SYNC 0x0000000f
493 #define RDHWR 0x0000003b
495 /* microMIPS definitions */
496 #define MM_POOL32A_FUNC 0xfc00ffff
497 #define MM_RDHWR 0x00006b3c
498 #define MM_RS 0x001f0000
499 #define MM_RT 0x03e00000
502 * The ll_bit is cleared by r*_switch.S
506 struct task_struct
*ll_task
;
508 static inline int simulate_ll(struct pt_regs
*regs
, unsigned int opcode
)
510 unsigned long value
, __user
*vaddr
;
514 * analyse the ll instruction that just caused a ri exception
515 * and put the referenced address to addr.
518 /* sign extend offset */
519 offset
= opcode
& OFFSET
;
523 vaddr
= (unsigned long __user
*)
524 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
526 if ((unsigned long)vaddr
& 3)
528 if (get_user(value
, vaddr
))
533 if (ll_task
== NULL
|| ll_task
== current
) {
542 regs
->regs
[(opcode
& RT
) >> 16] = value
;
547 static inline int simulate_sc(struct pt_regs
*regs
, unsigned int opcode
)
549 unsigned long __user
*vaddr
;
554 * analyse the sc instruction that just caused a ri exception
555 * and put the referenced address to addr.
558 /* sign extend offset */
559 offset
= opcode
& OFFSET
;
563 vaddr
= (unsigned long __user
*)
564 ((unsigned long)(regs
->regs
[(opcode
& BASE
) >> 21]) + offset
);
565 reg
= (opcode
& RT
) >> 16;
567 if ((unsigned long)vaddr
& 3)
572 if (ll_bit
== 0 || ll_task
!= current
) {
580 if (put_user(regs
->regs
[reg
], vaddr
))
589 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
590 * opcodes are supposed to result in coprocessor unusable exceptions if
591 * executed on ll/sc-less processors. That's the theory. In practice a
592 * few processors such as NEC's VR4100 throw reserved instruction exceptions
593 * instead, so we're doing the emulation thing in both exception handlers.
595 static int simulate_llsc(struct pt_regs
*regs
, unsigned int opcode
)
597 if ((opcode
& OPCODE
) == LL
) {
598 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
600 return simulate_ll(regs
, opcode
);
602 if ((opcode
& OPCODE
) == SC
) {
603 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
605 return simulate_sc(regs
, opcode
);
608 return -1; /* Must be something else ... */
612 * Simulate trapping 'rdhwr' instructions to provide user accessible
613 * registers not implemented in hardware.
615 static int simulate_rdhwr(struct pt_regs
*regs
, int rd
, int rt
)
617 struct thread_info
*ti
= task_thread_info(current
);
619 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
622 case 0: /* CPU number */
623 regs
->regs
[rt
] = smp_processor_id();
625 case 1: /* SYNCI length */
626 regs
->regs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
627 current_cpu_data
.icache
.linesz
);
629 case 2: /* Read count register */
630 regs
->regs
[rt
] = read_c0_count();
632 case 3: /* Count register resolution */
633 switch (current_cpu_type()) {
643 regs
->regs
[rt
] = ti
->tp_value
;
650 static int simulate_rdhwr_normal(struct pt_regs
*regs
, unsigned int opcode
)
652 if ((opcode
& OPCODE
) == SPEC3
&& (opcode
& FUNC
) == RDHWR
) {
653 int rd
= (opcode
& RD
) >> 11;
654 int rt
= (opcode
& RT
) >> 16;
656 simulate_rdhwr(regs
, rd
, rt
);
664 static int simulate_rdhwr_mm(struct pt_regs
*regs
, unsigned int opcode
)
666 if ((opcode
& MM_POOL32A_FUNC
) == MM_RDHWR
) {
667 int rd
= (opcode
& MM_RS
) >> 16;
668 int rt
= (opcode
& MM_RT
) >> 21;
669 simulate_rdhwr(regs
, rd
, rt
);
677 static int simulate_sync(struct pt_regs
*regs
, unsigned int opcode
)
679 if ((opcode
& OPCODE
) == SPEC0
&& (opcode
& FUNC
) == SYNC
) {
680 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
,
685 return -1; /* Must be something else ... */
688 asmlinkage
void do_ov(struct pt_regs
*regs
)
690 enum ctx_state prev_state
;
693 .si_code
= FPE_INTOVF
,
694 .si_addr
= (void __user
*)regs
->cp0_epc
,
697 prev_state
= exception_enter();
698 die_if_kernel("Integer overflow", regs
);
700 force_sig_info(SIGFPE
, &info
, current
);
701 exception_exit(prev_state
);
704 int process_fpemu_return(int sig
, void __user
*fault_addr
, unsigned long fcr31
)
706 struct siginfo si
= { 0 };
713 si
.si_addr
= fault_addr
;
716 * Inexact can happen together with Overflow or Underflow.
717 * Respect the mask to deliver the correct exception.
719 fcr31
&= (fcr31
& FPU_CSR_ALL_E
) <<
720 (ffs(FPU_CSR_ALL_X
) - ffs(FPU_CSR_ALL_E
));
721 if (fcr31
& FPU_CSR_INV_X
)
722 si
.si_code
= FPE_FLTINV
;
723 else if (fcr31
& FPU_CSR_DIV_X
)
724 si
.si_code
= FPE_FLTDIV
;
725 else if (fcr31
& FPU_CSR_OVF_X
)
726 si
.si_code
= FPE_FLTOVF
;
727 else if (fcr31
& FPU_CSR_UDF_X
)
728 si
.si_code
= FPE_FLTUND
;
729 else if (fcr31
& FPU_CSR_INE_X
)
730 si
.si_code
= FPE_FLTRES
;
732 si
.si_code
= __SI_FAULT
;
733 force_sig_info(sig
, &si
, current
);
737 si
.si_addr
= fault_addr
;
739 si
.si_code
= BUS_ADRERR
;
740 force_sig_info(sig
, &si
, current
);
744 si
.si_addr
= fault_addr
;
746 down_read(¤t
->mm
->mmap_sem
);
747 if (find_vma(current
->mm
, (unsigned long)fault_addr
))
748 si
.si_code
= SEGV_ACCERR
;
750 si
.si_code
= SEGV_MAPERR
;
751 up_read(¤t
->mm
->mmap_sem
);
752 force_sig_info(sig
, &si
, current
);
756 force_sig(sig
, current
);
761 static int simulate_fp(struct pt_regs
*regs
, unsigned int opcode
,
762 unsigned long old_epc
, unsigned long old_ra
)
764 union mips_instruction inst
= { .word
= opcode
};
765 void __user
*fault_addr
;
769 /* If it's obviously not an FP instruction, skip it */
770 switch (inst
.i_format
.opcode
) {
784 * do_ri skipped over the instruction via compute_return_epc, undo
785 * that for the FPU emulator.
787 regs
->cp0_epc
= old_epc
;
788 regs
->regs
[31] = old_ra
;
790 /* Save the FP context to struct thread_struct */
793 /* Run the emulator */
794 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
796 fcr31
= current
->thread
.fpu
.fcr31
;
799 * We can't allow the emulated instruction to leave any of
800 * the cause bits set in $fcr31.
802 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
804 /* Restore the hardware register state */
807 /* Send a signal if required. */
808 process_fpemu_return(sig
, fault_addr
, fcr31
);
814 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
816 asmlinkage
void do_fpe(struct pt_regs
*regs
, unsigned long fcr31
)
818 enum ctx_state prev_state
;
819 void __user
*fault_addr
;
822 prev_state
= exception_enter();
823 if (notify_die(DIE_FP
, "FP exception", regs
, 0, current
->thread
.trap_nr
,
824 SIGFPE
) == NOTIFY_STOP
)
827 /* Clear FCSR.Cause before enabling interrupts */
828 write_32bit_cp1_register(CP1_STATUS
, fcr31
& ~FPU_CSR_ALL_X
);
831 die_if_kernel("FP exception in kernel code", regs
);
833 if (fcr31
& FPU_CSR_UNI_X
) {
835 * Unimplemented operation exception. If we've got the full
836 * software emulator on-board, let's use it...
838 * Force FPU to dump state into task/thread context. We're
839 * moving a lot of data here for what is probably a single
840 * instruction, but the alternative is to pre-decode the FP
841 * register operands before invoking the emulator, which seems
842 * a bit extreme for what should be an infrequent event.
844 /* Ensure 'resume' not overwrite saved fp context again. */
847 /* Run the emulator */
848 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
850 fcr31
= current
->thread
.fpu
.fcr31
;
853 * We can't allow the emulated instruction to leave any of
854 * the cause bits set in $fcr31.
856 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
858 /* Restore the hardware register state */
859 own_fpu(1); /* Using the FPU again. */
862 fault_addr
= (void __user
*) regs
->cp0_epc
;
865 /* Send a signal if required. */
866 process_fpemu_return(sig
, fault_addr
, fcr31
);
869 exception_exit(prev_state
);
872 void do_trap_or_bp(struct pt_regs
*regs
, unsigned int code
, int si_code
,
875 siginfo_t info
= { 0 };
878 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
879 if (kgdb_ll_trap(DIE_TRAP
, str
, regs
, code
, current
->thread
.trap_nr
,
880 SIGTRAP
) == NOTIFY_STOP
)
882 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
884 if (notify_die(DIE_TRAP
, str
, regs
, code
, current
->thread
.trap_nr
,
885 SIGTRAP
) == NOTIFY_STOP
)
889 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
890 * insns, even for trap and break codes that indicate arithmetic
891 * failures. Weird ...
892 * But should we continue the brokenness??? --macro
897 scnprintf(b
, sizeof(b
), "%s instruction in kernel code", str
);
898 die_if_kernel(b
, regs
);
899 if (code
== BRK_DIVZERO
)
900 info
.si_code
= FPE_INTDIV
;
902 info
.si_code
= FPE_INTOVF
;
903 info
.si_signo
= SIGFPE
;
904 info
.si_addr
= (void __user
*) regs
->cp0_epc
;
905 force_sig_info(SIGFPE
, &info
, current
);
908 die_if_kernel("Kernel bug detected", regs
);
909 force_sig(SIGTRAP
, current
);
913 * This breakpoint code is used by the FPU emulator to retake
914 * control of the CPU after executing the instruction from the
915 * delay slot of an emulated branch.
917 * Terminate if exception was recognized as a delay slot return
918 * otherwise handle as normal.
920 if (do_dsemulret(regs
))
923 die_if_kernel("Math emu break/trap", regs
);
924 force_sig(SIGTRAP
, current
);
927 scnprintf(b
, sizeof(b
), "%s instruction in kernel code", str
);
928 die_if_kernel(b
, regs
);
930 info
.si_signo
= SIGTRAP
;
931 info
.si_code
= si_code
;
932 force_sig_info(SIGTRAP
, &info
, current
);
934 force_sig(SIGTRAP
, current
);
939 asmlinkage
void do_bp(struct pt_regs
*regs
)
941 unsigned long epc
= msk_isa16_mode(exception_epc(regs
));
942 unsigned int opcode
, bcode
;
943 enum ctx_state prev_state
;
947 if (!user_mode(regs
))
950 prev_state
= exception_enter();
951 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
952 if (get_isa16_mode(regs
->cp0_epc
)) {
955 if (__get_user(instr
[0], (u16 __user
*)epc
))
958 if (!cpu_has_mmips
) {
960 bcode
= (instr
[0] >> 5) & 0x3f;
961 } else if (mm_insn_16bit(instr
[0])) {
962 /* 16-bit microMIPS BREAK */
963 bcode
= instr
[0] & 0xf;
965 /* 32-bit microMIPS BREAK */
966 if (__get_user(instr
[1], (u16 __user
*)(epc
+ 2)))
968 opcode
= (instr
[0] << 16) | instr
[1];
969 bcode
= (opcode
>> 6) & ((1 << 20) - 1);
972 if (__get_user(opcode
, (unsigned int __user
*)epc
))
974 bcode
= (opcode
>> 6) & ((1 << 20) - 1);
978 * There is the ancient bug in the MIPS assemblers that the break
979 * code starts left to bit 16 instead to bit 6 in the opcode.
980 * Gas is bug-compatible, but not always, grrr...
981 * We handle both cases with a simple heuristics. --macro
983 if (bcode
>= (1 << 10))
984 bcode
= ((bcode
& ((1 << 10) - 1)) << 10) | (bcode
>> 10);
987 * notify the kprobe handlers, if instruction is likely to
992 if (notify_die(DIE_UPROBE
, "uprobe", regs
, bcode
,
993 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
998 if (notify_die(DIE_UPROBE_XOL
, "uprobe_xol", regs
, bcode
,
999 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1004 if (notify_die(DIE_BREAK
, "debug", regs
, bcode
,
1005 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1009 case BRK_KPROBE_SSTEPBP
:
1010 if (notify_die(DIE_SSTEPBP
, "single_step", regs
, bcode
,
1011 current
->thread
.trap_nr
, SIGTRAP
) == NOTIFY_STOP
)
1019 do_trap_or_bp(regs
, bcode
, TRAP_BRKPT
, "Break");
1023 exception_exit(prev_state
);
1027 force_sig(SIGSEGV
, current
);
1031 asmlinkage
void do_tr(struct pt_regs
*regs
)
1033 u32 opcode
, tcode
= 0;
1034 enum ctx_state prev_state
;
1037 unsigned long epc
= msk_isa16_mode(exception_epc(regs
));
1040 if (!user_mode(regs
))
1043 prev_state
= exception_enter();
1044 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1045 if (get_isa16_mode(regs
->cp0_epc
)) {
1046 if (__get_user(instr
[0], (u16 __user
*)(epc
+ 0)) ||
1047 __get_user(instr
[1], (u16 __user
*)(epc
+ 2)))
1049 opcode
= (instr
[0] << 16) | instr
[1];
1050 /* Immediate versions don't provide a code. */
1051 if (!(opcode
& OPCODE
))
1052 tcode
= (opcode
>> 12) & ((1 << 4) - 1);
1054 if (__get_user(opcode
, (u32 __user
*)epc
))
1056 /* Immediate versions don't provide a code. */
1057 if (!(opcode
& OPCODE
))
1058 tcode
= (opcode
>> 6) & ((1 << 10) - 1);
1061 do_trap_or_bp(regs
, tcode
, 0, "Trap");
1065 exception_exit(prev_state
);
1069 force_sig(SIGSEGV
, current
);
1073 asmlinkage
void do_ri(struct pt_regs
*regs
)
1075 unsigned int __user
*epc
= (unsigned int __user
*)exception_epc(regs
);
1076 unsigned long old_epc
= regs
->cp0_epc
;
1077 unsigned long old31
= regs
->regs
[31];
1078 enum ctx_state prev_state
;
1079 unsigned int opcode
= 0;
1083 * Avoid any kernel code. Just emulate the R2 instruction
1084 * as quickly as possible.
1086 if (mipsr2_emulation
&& cpu_has_mips_r6
&&
1087 likely(user_mode(regs
)) &&
1088 likely(get_user(opcode
, epc
) >= 0)) {
1089 unsigned long fcr31
= 0;
1091 status
= mipsr2_decoder(regs
, opcode
, &fcr31
);
1095 task_thread_info(current
)->r2_emul_return
= 1;
1100 process_fpemu_return(status
,
1101 ¤t
->thread
.cp0_baduaddr
,
1103 task_thread_info(current
)->r2_emul_return
= 1;
1110 prev_state
= exception_enter();
1111 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1113 if (notify_die(DIE_RI
, "RI Fault", regs
, 0, current
->thread
.trap_nr
,
1114 SIGILL
) == NOTIFY_STOP
)
1117 die_if_kernel("Reserved instruction in kernel code", regs
);
1119 if (unlikely(compute_return_epc(regs
) < 0))
1122 if (!get_isa16_mode(regs
->cp0_epc
)) {
1123 if (unlikely(get_user(opcode
, epc
) < 0))
1126 if (!cpu_has_llsc
&& status
< 0)
1127 status
= simulate_llsc(regs
, opcode
);
1130 status
= simulate_rdhwr_normal(regs
, opcode
);
1133 status
= simulate_sync(regs
, opcode
);
1136 status
= simulate_fp(regs
, opcode
, old_epc
, old31
);
1137 } else if (cpu_has_mmips
) {
1138 unsigned short mmop
[2] = { 0 };
1140 if (unlikely(get_user(mmop
[0], (u16 __user
*)epc
+ 0) < 0))
1142 if (unlikely(get_user(mmop
[1], (u16 __user
*)epc
+ 1) < 0))
1145 opcode
= (opcode
<< 16) | mmop
[1];
1148 status
= simulate_rdhwr_mm(regs
, opcode
);
1154 if (unlikely(status
> 0)) {
1155 regs
->cp0_epc
= old_epc
; /* Undo skip-over. */
1156 regs
->regs
[31] = old31
;
1157 force_sig(status
, current
);
1161 exception_exit(prev_state
);
1165 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1166 * emulated more than some threshold number of instructions, force migration to
1167 * a "CPU" that has FP support.
1169 static void mt_ase_fp_affinity(void)
1171 #ifdef CONFIG_MIPS_MT_FPAFF
1172 if (mt_fpemul_threshold
> 0 &&
1173 ((current
->thread
.emulated_fp
++ > mt_fpemul_threshold
))) {
1175 * If there's no FPU present, or if the application has already
1176 * restricted the allowed set to exclude any CPUs with FPUs,
1177 * we'll skip the procedure.
1179 if (cpumask_intersects(¤t
->cpus_allowed
, &mt_fpu_cpumask
)) {
1182 current
->thread
.user_cpus_allowed
1183 = current
->cpus_allowed
;
1184 cpumask_and(&tmask
, ¤t
->cpus_allowed
,
1186 set_cpus_allowed_ptr(current
, &tmask
);
1187 set_thread_flag(TIF_FPUBOUND
);
1190 #endif /* CONFIG_MIPS_MT_FPAFF */
1194 * No lock; only written during early bootup by CPU 0.
1196 static RAW_NOTIFIER_HEAD(cu2_chain
);
1198 int __ref
register_cu2_notifier(struct notifier_block
*nb
)
1200 return raw_notifier_chain_register(&cu2_chain
, nb
);
1203 int cu2_notifier_call_chain(unsigned long val
, void *v
)
1205 return raw_notifier_call_chain(&cu2_chain
, val
, v
);
1208 static int default_cu2_call(struct notifier_block
*nfb
, unsigned long action
,
1211 struct pt_regs
*regs
= data
;
1213 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1214 "instruction", regs
);
1215 force_sig(SIGILL
, current
);
1220 static int wait_on_fp_mode_switch(atomic_t
*p
)
1223 * The FP mode for this task is currently being switched. That may
1224 * involve modifications to the format of this tasks FP context which
1225 * make it unsafe to proceed with execution for the moment. Instead,
1226 * schedule some other task.
1232 static int enable_restore_fp_context(int msa
)
1234 int err
, was_fpu_owner
, prior_msa
;
1237 * If an FP mode switch is currently underway, wait for it to
1238 * complete before proceeding.
1240 wait_on_atomic_t(¤t
->mm
->context
.fp_mode_switching
,
1241 wait_on_fp_mode_switch
, TASK_KILLABLE
);
1244 /* First time FP context user. */
1250 set_thread_flag(TIF_USEDMSA
);
1251 set_thread_flag(TIF_MSA_CTX_LIVE
);
1260 * This task has formerly used the FP context.
1262 * If this thread has no live MSA vector context then we can simply
1263 * restore the scalar FP context. If it has live MSA vector context
1264 * (that is, it has or may have used MSA since last performing a
1265 * function call) then we'll need to restore the vector context. This
1266 * applies even if we're currently only executing a scalar FP
1267 * instruction. This is because if we were to later execute an MSA
1268 * instruction then we'd either have to:
1270 * - Restore the vector context & clobber any registers modified by
1271 * scalar FP instructions between now & then.
1275 * - Not restore the vector context & lose the most significant bits
1276 * of all vector registers.
1278 * Neither of those options is acceptable. We cannot restore the least
1279 * significant bits of the registers now & only restore the most
1280 * significant bits later because the most significant bits of any
1281 * vector registers whose aliased FP register is modified now will have
1282 * been zeroed. We'd have no way to know that when restoring the vector
1283 * context & thus may load an outdated value for the most significant
1284 * bits of a vector register.
1286 if (!msa
&& !thread_msa_context_live())
1290 * This task is using or has previously used MSA. Thus we require
1291 * that Status.FR == 1.
1294 was_fpu_owner
= is_fpu_owner();
1295 err
= own_fpu_inatomic(0);
1300 write_msa_csr(current
->thread
.fpu
.msacsr
);
1301 set_thread_flag(TIF_USEDMSA
);
1304 * If this is the first time that the task is using MSA and it has
1305 * previously used scalar FP in this time slice then we already nave
1306 * FP context which we shouldn't clobber. We do however need to clear
1307 * the upper 64b of each vector register so that this task has no
1308 * opportunity to see data left behind by another.
1310 prior_msa
= test_and_set_thread_flag(TIF_MSA_CTX_LIVE
);
1311 if (!prior_msa
&& was_fpu_owner
) {
1319 * Restore the least significant 64b of each vector register
1320 * from the existing scalar FP context.
1322 _restore_fp(current
);
1325 * The task has not formerly used MSA, so clear the upper 64b
1326 * of each vector register such that it cannot see data left
1327 * behind by another task.
1331 /* We need to restore the vector context. */
1332 restore_msa(current
);
1334 /* Restore the scalar FP control & status register */
1336 write_32bit_cp1_register(CP1_STATUS
,
1337 current
->thread
.fpu
.fcr31
);
1346 asmlinkage
void do_cpu(struct pt_regs
*regs
)
1348 enum ctx_state prev_state
;
1349 unsigned int __user
*epc
;
1350 unsigned long old_epc
, old31
;
1351 void __user
*fault_addr
;
1352 unsigned int opcode
;
1353 unsigned long fcr31
;
1358 prev_state
= exception_enter();
1359 cpid
= (regs
->cp0_cause
>> CAUSEB_CE
) & 3;
1362 die_if_kernel("do_cpu invoked from kernel context!", regs
);
1366 epc
= (unsigned int __user
*)exception_epc(regs
);
1367 old_epc
= regs
->cp0_epc
;
1368 old31
= regs
->regs
[31];
1372 if (unlikely(compute_return_epc(regs
) < 0))
1375 if (!get_isa16_mode(regs
->cp0_epc
)) {
1376 if (unlikely(get_user(opcode
, epc
) < 0))
1379 if (!cpu_has_llsc
&& status
< 0)
1380 status
= simulate_llsc(regs
, opcode
);
1386 if (unlikely(status
> 0)) {
1387 regs
->cp0_epc
= old_epc
; /* Undo skip-over. */
1388 regs
->regs
[31] = old31
;
1389 force_sig(status
, current
);
1396 * The COP3 opcode space and consequently the CP0.Status.CU3
1397 * bit and the CP0.Cause.CE=3 encoding have been removed as
1398 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1399 * up the space has been reused for COP1X instructions, that
1400 * are enabled by the CP0.Status.CU1 bit and consequently
1401 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1402 * exceptions. Some FPU-less processors that implement one
1403 * of these ISAs however use this code erroneously for COP1X
1404 * instructions. Therefore we redirect this trap to the FP
1407 if (raw_cpu_has_fpu
|| !cpu_has_mips_4_5_64_r2_r6
) {
1408 force_sig(SIGILL
, current
);
1414 err
= enable_restore_fp_context(0);
1416 if (raw_cpu_has_fpu
&& !err
)
1419 sig
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 0,
1421 fcr31
= current
->thread
.fpu
.fcr31
;
1424 * We can't allow the emulated instruction to leave
1425 * any of the cause bits set in $fcr31.
1427 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
1429 /* Send a signal if required. */
1430 if (!process_fpemu_return(sig
, fault_addr
, fcr31
) && !err
)
1431 mt_ase_fp_affinity();
1436 raw_notifier_call_chain(&cu2_chain
, CU2_EXCEPTION
, regs
);
1440 exception_exit(prev_state
);
1443 asmlinkage
void do_msa_fpe(struct pt_regs
*regs
, unsigned int msacsr
)
1445 enum ctx_state prev_state
;
1447 prev_state
= exception_enter();
1448 current
->thread
.trap_nr
= (regs
->cp0_cause
>> 2) & 0x1f;
1449 if (notify_die(DIE_MSAFP
, "MSA FP exception", regs
, 0,
1450 current
->thread
.trap_nr
, SIGFPE
) == NOTIFY_STOP
)
1453 /* Clear MSACSR.Cause before enabling interrupts */
1454 write_msa_csr(msacsr
& ~MSA_CSR_CAUSEF
);
1457 die_if_kernel("do_msa_fpe invoked from kernel context!", regs
);
1458 force_sig(SIGFPE
, current
);
1460 exception_exit(prev_state
);
1463 asmlinkage
void do_msa(struct pt_regs
*regs
)
1465 enum ctx_state prev_state
;
1468 prev_state
= exception_enter();
1470 if (!cpu_has_msa
|| test_thread_flag(TIF_32BIT_FPREGS
)) {
1471 force_sig(SIGILL
, current
);
1475 die_if_kernel("do_msa invoked from kernel context!", regs
);
1477 err
= enable_restore_fp_context(1);
1479 force_sig(SIGILL
, current
);
1481 exception_exit(prev_state
);
1484 asmlinkage
void do_mdmx(struct pt_regs
*regs
)
1486 enum ctx_state prev_state
;
1488 prev_state
= exception_enter();
1489 force_sig(SIGILL
, current
);
1490 exception_exit(prev_state
);
1494 * Called with interrupts disabled.
1496 asmlinkage
void do_watch(struct pt_regs
*regs
)
1498 siginfo_t info
= { .si_signo
= SIGTRAP
, .si_code
= TRAP_HWBKPT
};
1499 enum ctx_state prev_state
;
1501 prev_state
= exception_enter();
1503 * Clear WP (bit 22) bit of cause register so we don't loop
1506 clear_c0_cause(CAUSEF_WP
);
1509 * If the current thread has the watch registers loaded, save
1510 * their values and send SIGTRAP. Otherwise another thread
1511 * left the registers set, clear them and continue.
1513 if (test_tsk_thread_flag(current
, TIF_LOAD_WATCH
)) {
1514 mips_read_watch_registers();
1516 force_sig_info(SIGTRAP
, &info
, current
);
1518 mips_clear_watch_registers();
1521 exception_exit(prev_state
);
1524 asmlinkage
void do_mcheck(struct pt_regs
*regs
)
1526 int multi_match
= regs
->cp0_status
& ST0_TS
;
1527 enum ctx_state prev_state
;
1528 mm_segment_t old_fs
= get_fs();
1530 prev_state
= exception_enter();
1539 if (!user_mode(regs
))
1542 show_code((unsigned int __user
*) regs
->cp0_epc
);
1547 * Some chips may have other causes of machine check (e.g. SB1
1550 panic("Caught Machine Check exception - %scaused by multiple "
1551 "matching entries in the TLB.",
1552 (multi_match
) ? "" : "not ");
1555 asmlinkage
void do_mt(struct pt_regs
*regs
)
1559 subcode
= (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT
)
1560 >> VPECONTROL_EXCPT_SHIFT
;
1563 printk(KERN_DEBUG
"Thread Underflow\n");
1566 printk(KERN_DEBUG
"Thread Overflow\n");
1569 printk(KERN_DEBUG
"Invalid YIELD Qualifier\n");
1572 printk(KERN_DEBUG
"Gating Storage Exception\n");
1575 printk(KERN_DEBUG
"YIELD Scheduler Exception\n");
1578 printk(KERN_DEBUG
"Gating Storage Scheduler Exception\n");
1581 printk(KERN_DEBUG
"*** UNKNOWN THREAD EXCEPTION %d ***\n",
1585 die_if_kernel("MIPS MT Thread exception in kernel", regs
);
1587 force_sig(SIGILL
, current
);
1591 asmlinkage
void do_dsp(struct pt_regs
*regs
)
1594 panic("Unexpected DSP exception");
1596 force_sig(SIGILL
, current
);
1599 asmlinkage
void do_reserved(struct pt_regs
*regs
)
1602 * Game over - no way to handle this if it ever occurs. Most probably
1603 * caused by a new unknown cpu type or after another deadly
1604 * hard/software error.
1607 panic("Caught reserved exception %ld - should not happen.",
1608 (regs
->cp0_cause
& 0x7f) >> 2);
1611 static int __initdata l1parity
= 1;
1612 static int __init
nol1parity(char *s
)
1617 __setup("nol1par", nol1parity
);
1618 static int __initdata l2parity
= 1;
1619 static int __init
nol2parity(char *s
)
1624 __setup("nol2par", nol2parity
);
1627 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1628 * it different ways.
1630 static inline void parity_protection_init(void)
1632 switch (current_cpu_type()) {
1638 case CPU_INTERAPTIV
:
1641 case CPU_QEMU_GENERIC
:
1645 #define ERRCTL_PE 0x80000000
1646 #define ERRCTL_L2P 0x00800000
1647 unsigned long errctl
;
1648 unsigned int l1parity_present
, l2parity_present
;
1650 errctl
= read_c0_ecc();
1651 errctl
&= ~(ERRCTL_PE
|ERRCTL_L2P
);
1653 /* probe L1 parity support */
1654 write_c0_ecc(errctl
| ERRCTL_PE
);
1655 back_to_back_c0_hazard();
1656 l1parity_present
= (read_c0_ecc() & ERRCTL_PE
);
1658 /* probe L2 parity support */
1659 write_c0_ecc(errctl
|ERRCTL_L2P
);
1660 back_to_back_c0_hazard();
1661 l2parity_present
= (read_c0_ecc() & ERRCTL_L2P
);
1663 if (l1parity_present
&& l2parity_present
) {
1665 errctl
|= ERRCTL_PE
;
1666 if (l1parity
^ l2parity
)
1667 errctl
|= ERRCTL_L2P
;
1668 } else if (l1parity_present
) {
1670 errctl
|= ERRCTL_PE
;
1671 } else if (l2parity_present
) {
1673 errctl
|= ERRCTL_L2P
;
1675 /* No parity available */
1678 printk(KERN_INFO
"Writing ErrCtl register=%08lx\n", errctl
);
1680 write_c0_ecc(errctl
);
1681 back_to_back_c0_hazard();
1682 errctl
= read_c0_ecc();
1683 printk(KERN_INFO
"Readback ErrCtl register=%08lx\n", errctl
);
1685 if (l1parity_present
)
1686 printk(KERN_INFO
"Cache parity protection %sabled\n",
1687 (errctl
& ERRCTL_PE
) ? "en" : "dis");
1689 if (l2parity_present
) {
1690 if (l1parity_present
&& l1parity
)
1691 errctl
^= ERRCTL_L2P
;
1692 printk(KERN_INFO
"L2 cache parity protection %sabled\n",
1693 (errctl
& ERRCTL_L2P
) ? "en" : "dis");
1701 write_c0_ecc(0x80000000);
1702 back_to_back_c0_hazard();
1703 /* Set the PE bit (bit 31) in the c0_errctl register. */
1704 printk(KERN_INFO
"Cache parity protection %sabled\n",
1705 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1709 /* Clear the DE bit (bit 16) in the c0_status register. */
1710 printk(KERN_INFO
"Enable cache parity protection for "
1711 "MIPS 20KC/25KF CPUs.\n");
1712 clear_c0_status(ST0_DE
);
1719 asmlinkage
void cache_parity_error(void)
1721 const int field
= 2 * sizeof(unsigned long);
1722 unsigned int reg_val
;
1724 /* For the moment, report the problem and hang. */
1725 printk("Cache error exception:\n");
1726 printk("cp0_errorepc == %0*lx\n", field
, read_c0_errorepc());
1727 reg_val
= read_c0_cacheerr();
1728 printk("c0_cacheerr == %08x\n", reg_val
);
1730 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1731 reg_val
& (1<<30) ? "secondary" : "primary",
1732 reg_val
& (1<<31) ? "data" : "insn");
1733 if ((cpu_has_mips_r2_r6
) &&
1734 ((current_cpu_data
.processor_id
& 0xff0000) == PRID_COMP_MIPS
)) {
1735 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1736 reg_val
& (1<<29) ? "ED " : "",
1737 reg_val
& (1<<28) ? "ET " : "",
1738 reg_val
& (1<<27) ? "ES " : "",
1739 reg_val
& (1<<26) ? "EE " : "",
1740 reg_val
& (1<<25) ? "EB " : "",
1741 reg_val
& (1<<24) ? "EI " : "",
1742 reg_val
& (1<<23) ? "E1 " : "",
1743 reg_val
& (1<<22) ? "E0 " : "");
1745 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1746 reg_val
& (1<<29) ? "ED " : "",
1747 reg_val
& (1<<28) ? "ET " : "",
1748 reg_val
& (1<<26) ? "EE " : "",
1749 reg_val
& (1<<25) ? "EB " : "",
1750 reg_val
& (1<<24) ? "EI " : "",
1751 reg_val
& (1<<23) ? "E1 " : "",
1752 reg_val
& (1<<22) ? "E0 " : "");
1754 printk("IDX: 0x%08x\n", reg_val
& ((1<<22)-1));
1756 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1757 if (reg_val
& (1<<22))
1758 printk("DErrAddr0: 0x%0*lx\n", field
, read_c0_derraddr0());
1760 if (reg_val
& (1<<23))
1761 printk("DErrAddr1: 0x%0*lx\n", field
, read_c0_derraddr1());
1764 panic("Can't handle the cache error!");
1767 asmlinkage
void do_ftlb(void)
1769 const int field
= 2 * sizeof(unsigned long);
1770 unsigned int reg_val
;
1772 /* For the moment, report the problem and hang. */
1773 if ((cpu_has_mips_r2_r6
) &&
1774 (((current_cpu_data
.processor_id
& 0xff0000) == PRID_COMP_MIPS
) ||
1775 ((current_cpu_data
.processor_id
& 0xff0000) == PRID_COMP_LOONGSON
))) {
1776 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1778 pr_err("cp0_errorepc == %0*lx\n", field
, read_c0_errorepc());
1779 reg_val
= read_c0_cacheerr();
1780 pr_err("c0_cacheerr == %08x\n", reg_val
);
1782 if ((reg_val
& 0xc0000000) == 0xc0000000) {
1783 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1785 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1786 reg_val
& (1<<30) ? "secondary" : "primary",
1787 reg_val
& (1<<31) ? "data" : "insn");
1790 pr_err("FTLB error exception\n");
1792 /* Just print the cacheerr bits for now */
1793 cache_parity_error();
1797 * SDBBP EJTAG debug exception handler.
1798 * We skip the instruction and return to the next instruction.
1800 void ejtag_exception_handler(struct pt_regs
*regs
)
1802 const int field
= 2 * sizeof(unsigned long);
1803 unsigned long depc
, old_epc
, old_ra
;
1806 printk(KERN_DEBUG
"SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1807 depc
= read_c0_depc();
1808 debug
= read_c0_debug();
1809 printk(KERN_DEBUG
"c0_depc = %0*lx, DEBUG = %08x\n", field
, depc
, debug
);
1810 if (debug
& 0x80000000) {
1812 * In branch delay slot.
1813 * We cheat a little bit here and use EPC to calculate the
1814 * debug return address (DEPC). EPC is restored after the
1817 old_epc
= regs
->cp0_epc
;
1818 old_ra
= regs
->regs
[31];
1819 regs
->cp0_epc
= depc
;
1820 compute_return_epc(regs
);
1821 depc
= regs
->cp0_epc
;
1822 regs
->cp0_epc
= old_epc
;
1823 regs
->regs
[31] = old_ra
;
1826 write_c0_depc(depc
);
1829 printk(KERN_DEBUG
"\n\n----- Enable EJTAG single stepping ----\n\n");
1830 write_c0_debug(debug
| 0x100);
1835 * NMI exception handler.
1836 * No lock; only written during early bootup by CPU 0.
1838 static RAW_NOTIFIER_HEAD(nmi_chain
);
1840 int register_nmi_notifier(struct notifier_block
*nb
)
1842 return raw_notifier_chain_register(&nmi_chain
, nb
);
1845 void __noreturn
nmi_exception_handler(struct pt_regs
*regs
)
1850 raw_notifier_call_chain(&nmi_chain
, 0, regs
);
1852 snprintf(str
, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1853 smp_processor_id(), regs
->cp0_epc
);
1854 regs
->cp0_epc
= read_c0_errorepc();
1859 #define VECTORSPACING 0x100 /* for EI/VI mode */
1861 unsigned long ebase
;
1862 unsigned long exception_handlers
[32];
1863 unsigned long vi_handlers
[64];
1865 void __init
*set_except_vector(int n
, void *addr
)
1867 unsigned long handler
= (unsigned long) addr
;
1868 unsigned long old_handler
;
1870 #ifdef CONFIG_CPU_MICROMIPS
1872 * Only the TLB handlers are cache aligned with an even
1873 * address. All other handlers are on an odd address and
1874 * require no modification. Otherwise, MIPS32 mode will
1875 * be entered when handling any TLB exceptions. That
1876 * would be bad...since we must stay in microMIPS mode.
1878 if (!(handler
& 0x1))
1881 old_handler
= xchg(&exception_handlers
[n
], handler
);
1883 if (n
== 0 && cpu_has_divec
) {
1884 #ifdef CONFIG_CPU_MICROMIPS
1885 unsigned long jump_mask
= ~((1 << 27) - 1);
1887 unsigned long jump_mask
= ~((1 << 28) - 1);
1889 u32
*buf
= (u32
*)(ebase
+ 0x200);
1890 unsigned int k0
= 26;
1891 if ((handler
& jump_mask
) == ((ebase
+ 0x200) & jump_mask
)) {
1892 uasm_i_j(&buf
, handler
& ~jump_mask
);
1895 UASM_i_LA(&buf
, k0
, handler
);
1896 uasm_i_jr(&buf
, k0
);
1899 local_flush_icache_range(ebase
+ 0x200, (unsigned long)buf
);
1901 return (void *)old_handler
;
1904 static void do_default_vi(void)
1906 show_regs(get_irq_regs());
1907 panic("Caught unexpected vectored interrupt.");
1910 static void *set_vi_srs_handler(int n
, vi_handler_t addr
, int srs
)
1912 unsigned long handler
;
1913 unsigned long old_handler
= vi_handlers
[n
];
1914 int srssets
= current_cpu_data
.srsets
;
1918 BUG_ON(!cpu_has_veic
&& !cpu_has_vint
);
1921 handler
= (unsigned long) do_default_vi
;
1924 handler
= (unsigned long) addr
;
1925 vi_handlers
[n
] = handler
;
1927 b
= (unsigned char *)(ebase
+ 0x200 + n
*VECTORSPACING
);
1930 panic("Shadow register set %d not supported", srs
);
1933 if (board_bind_eic_interrupt
)
1934 board_bind_eic_interrupt(n
, srs
);
1935 } else if (cpu_has_vint
) {
1936 /* SRSMap is only defined if shadow sets are implemented */
1938 change_c0_srsmap(0xf << n
*4, srs
<< n
*4);
1943 * If no shadow set is selected then use the default handler
1944 * that does normal register saving and standard interrupt exit
1946 extern char except_vec_vi
, except_vec_vi_lui
;
1947 extern char except_vec_vi_ori
, except_vec_vi_end
;
1948 extern char rollback_except_vec_vi
;
1949 char *vec_start
= using_rollback_handler() ?
1950 &rollback_except_vec_vi
: &except_vec_vi
;
1951 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1952 const int lui_offset
= &except_vec_vi_lui
- vec_start
+ 2;
1953 const int ori_offset
= &except_vec_vi_ori
- vec_start
+ 2;
1955 const int lui_offset
= &except_vec_vi_lui
- vec_start
;
1956 const int ori_offset
= &except_vec_vi_ori
- vec_start
;
1958 const int handler_len
= &except_vec_vi_end
- vec_start
;
1960 if (handler_len
> VECTORSPACING
) {
1962 * Sigh... panicing won't help as the console
1963 * is probably not configured :(
1965 panic("VECTORSPACING too small");
1968 set_handler(((unsigned long)b
- ebase
), vec_start
,
1969 #ifdef CONFIG_CPU_MICROMIPS
1974 h
= (u16
*)(b
+ lui_offset
);
1975 *h
= (handler
>> 16) & 0xffff;
1976 h
= (u16
*)(b
+ ori_offset
);
1977 *h
= (handler
& 0xffff);
1978 local_flush_icache_range((unsigned long)b
,
1979 (unsigned long)(b
+handler_len
));
1983 * In other cases jump directly to the interrupt handler. It
1984 * is the handler's responsibility to save registers if required
1985 * (eg hi/lo) and return from the exception using "eret".
1991 #ifdef CONFIG_CPU_MICROMIPS
1992 insn
= 0xd4000000 | (((u32
)handler
& 0x07ffffff) >> 1);
1994 insn
= 0x08000000 | (((u32
)handler
& 0x0fffffff) >> 2);
1996 h
[0] = (insn
>> 16) & 0xffff;
1997 h
[1] = insn
& 0xffff;
2000 local_flush_icache_range((unsigned long)b
,
2001 (unsigned long)(b
+8));
2004 return (void *)old_handler
;
2007 void *set_vi_handler(int n
, vi_handler_t addr
)
2009 return set_vi_srs_handler(n
, addr
, 0);
2012 extern void tlb_init(void);
2017 int cp0_compare_irq
;
2018 EXPORT_SYMBOL_GPL(cp0_compare_irq
);
2019 int cp0_compare_irq_shift
;
2022 * Performance counter IRQ or -1 if shared with timer
2024 int cp0_perfcount_irq
;
2025 EXPORT_SYMBOL_GPL(cp0_perfcount_irq
);
2028 * Fast debug channel IRQ or -1 if not present
2031 EXPORT_SYMBOL_GPL(cp0_fdc_irq
);
2035 static int __init
ulri_disable(char *s
)
2037 pr_info("Disabling ulri\n");
2042 __setup("noulri", ulri_disable
);
2044 /* configure STATUS register */
2045 static void configure_status(void)
2048 * Disable coprocessors and select 32-bit or 64-bit addressing
2049 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2050 * flag that some firmware may have left set and the TS bit (for
2051 * IP27). Set XX for ISA IV code to work.
2053 unsigned int status_set
= ST0_CU0
;
2055 status_set
|= ST0_FR
|ST0_KX
|ST0_SX
|ST0_UX
;
2057 if (current_cpu_data
.isa_level
& MIPS_CPU_ISA_IV
)
2058 status_set
|= ST0_XX
;
2060 status_set
|= ST0_MX
;
2062 change_c0_status(ST0_CU
|ST0_MX
|ST0_RE
|ST0_FR
|ST0_BEV
|ST0_TS
|ST0_KX
|ST0_SX
|ST0_UX
,
2066 /* configure HWRENA register */
2067 static void configure_hwrena(void)
2069 unsigned int hwrena
= cpu_hwrena_impl_bits
;
2071 if (cpu_has_mips_r2_r6
)
2072 hwrena
|= 0x0000000f;
2074 if (!noulri
&& cpu_has_userlocal
)
2075 hwrena
|= (1 << 29);
2078 write_c0_hwrena(hwrena
);
2081 static void configure_exception_vector(void)
2083 if (cpu_has_veic
|| cpu_has_vint
) {
2084 unsigned long sr
= set_c0_status(ST0_BEV
);
2085 write_c0_ebase(ebase
);
2086 write_c0_status(sr
);
2087 /* Setting vector spacing enables EI/VI mode */
2088 change_c0_intctl(0x3e0, VECTORSPACING
);
2090 if (cpu_has_divec
) {
2091 if (cpu_has_mipsmt
) {
2092 unsigned int vpflags
= dvpe();
2093 set_c0_cause(CAUSEF_IV
);
2096 set_c0_cause(CAUSEF_IV
);
2100 void per_cpu_trap_init(bool is_boot_cpu
)
2102 unsigned int cpu
= smp_processor_id();
2107 configure_exception_vector();
2110 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2112 * o read IntCtl.IPTI to determine the timer interrupt
2113 * o read IntCtl.IPPCI to determine the performance counter interrupt
2114 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2116 if (cpu_has_mips_r2_r6
) {
2118 * We shouldn't trust a secondary core has a sane EBASE register
2119 * so use the one calculated by the boot CPU.
2122 write_c0_ebase(ebase
);
2124 cp0_compare_irq_shift
= CAUSEB_TI
- CAUSEB_IP
;
2125 cp0_compare_irq
= (read_c0_intctl() >> INTCTLB_IPTI
) & 7;
2126 cp0_perfcount_irq
= (read_c0_intctl() >> INTCTLB_IPPCI
) & 7;
2127 cp0_fdc_irq
= (read_c0_intctl() >> INTCTLB_IPFDC
) & 7;
2132 cp0_compare_irq
= CP0_LEGACY_COMPARE_IRQ
;
2133 cp0_compare_irq_shift
= CP0_LEGACY_PERFCNT_IRQ
;
2134 cp0_perfcount_irq
= -1;
2138 if (!cpu_data
[cpu
].asid_cache
)
2139 cpu_data
[cpu
].asid_cache
= asid_first_version(cpu
);
2141 atomic_inc(&init_mm
.mm_count
);
2142 current
->active_mm
= &init_mm
;
2143 BUG_ON(current
->mm
);
2144 enter_lazy_tlb(&init_mm
, current
);
2146 /* Boot CPU's cache setup in setup_arch(). */
2150 TLBMISS_HANDLER_SETUP();
2153 /* Install CPU exception handler */
2154 void set_handler(unsigned long offset
, void *addr
, unsigned long size
)
2156 #ifdef CONFIG_CPU_MICROMIPS
2157 memcpy((void *)(ebase
+ offset
), ((unsigned char *)addr
- 1), size
);
2159 memcpy((void *)(ebase
+ offset
), addr
, size
);
2161 local_flush_icache_range(ebase
+ offset
, ebase
+ offset
+ size
);
2164 static char panic_null_cerr
[] =
2165 "Trying to set NULL cache error exception handler";
2168 * Install uncached CPU exception handler.
2169 * This is suitable only for the cache error exception which is the only
2170 * exception handler that is being run uncached.
2172 void set_uncached_handler(unsigned long offset
, void *addr
,
2175 unsigned long uncached_ebase
= CKSEG1ADDR(ebase
);
2178 panic(panic_null_cerr
);
2180 memcpy((void *)(uncached_ebase
+ offset
), addr
, size
);
2183 static int __initdata rdhwr_noopt
;
2184 static int __init
set_rdhwr_noopt(char *str
)
2190 __setup("rdhwr_noopt", set_rdhwr_noopt
);
2192 void __init
trap_init(void)
2194 extern char except_vec3_generic
;
2195 extern char except_vec4
;
2196 extern char except_vec3_r4000
;
2201 if (cpu_has_veic
|| cpu_has_vint
) {
2202 unsigned long size
= 0x200 + VECTORSPACING
*64;
2203 ebase
= (unsigned long)
2204 __alloc_bootmem(size
, 1 << fls(size
), 0);
2208 if (cpu_has_mips_r2_r6
)
2209 ebase
+= (read_c0_ebase() & 0x3ffff000);
2212 if (cpu_has_mmips
) {
2213 unsigned int config3
= read_c0_config3();
2215 if (IS_ENABLED(CONFIG_CPU_MICROMIPS
))
2216 write_c0_config3(config3
| MIPS_CONF3_ISA_OE
);
2218 write_c0_config3(config3
& ~MIPS_CONF3_ISA_OE
);
2221 if (board_ebase_setup
)
2222 board_ebase_setup();
2223 per_cpu_trap_init(true);
2226 * Copy the generic exception handlers to their final destination.
2227 * This will be overridden later as suitable for a particular
2230 set_handler(0x180, &except_vec3_generic
, 0x80);
2233 * Setup default vectors
2235 for (i
= 0; i
<= 31; i
++)
2236 set_except_vector(i
, handle_reserved
);
2239 * Copy the EJTAG debug exception vector handler code to it's final
2242 if (cpu_has_ejtag
&& board_ejtag_handler_setup
)
2243 board_ejtag_handler_setup();
2246 * Only some CPUs have the watch exceptions.
2249 set_except_vector(EXCCODE_WATCH
, handle_watch
);
2252 * Initialise interrupt handlers
2254 if (cpu_has_veic
|| cpu_has_vint
) {
2255 int nvec
= cpu_has_veic
? 64 : 8;
2256 for (i
= 0; i
< nvec
; i
++)
2257 set_vi_handler(i
, NULL
);
2259 else if (cpu_has_divec
)
2260 set_handler(0x200, &except_vec4
, 0x8);
2263 * Some CPUs can enable/disable for cache parity detection, but does
2264 * it different ways.
2266 parity_protection_init();
2269 * The Data Bus Errors / Instruction Bus Errors are signaled
2270 * by external hardware. Therefore these two exceptions
2271 * may have board specific handlers.
2276 set_except_vector(EXCCODE_INT
, using_rollback_handler() ?
2277 rollback_handle_int
: handle_int
);
2278 set_except_vector(EXCCODE_MOD
, handle_tlbm
);
2279 set_except_vector(EXCCODE_TLBL
, handle_tlbl
);
2280 set_except_vector(EXCCODE_TLBS
, handle_tlbs
);
2282 set_except_vector(EXCCODE_ADEL
, handle_adel
);
2283 set_except_vector(EXCCODE_ADES
, handle_ades
);
2285 set_except_vector(EXCCODE_IBE
, handle_ibe
);
2286 set_except_vector(EXCCODE_DBE
, handle_dbe
);
2288 set_except_vector(EXCCODE_SYS
, handle_sys
);
2289 set_except_vector(EXCCODE_BP
, handle_bp
);
2290 set_except_vector(EXCCODE_RI
, rdhwr_noopt
? handle_ri
:
2291 (cpu_has_vtag_icache
?
2292 handle_ri_rdhwr_vivt
: handle_ri_rdhwr
));
2293 set_except_vector(EXCCODE_CPU
, handle_cpu
);
2294 set_except_vector(EXCCODE_OV
, handle_ov
);
2295 set_except_vector(EXCCODE_TR
, handle_tr
);
2296 set_except_vector(EXCCODE_MSAFPE
, handle_msa_fpe
);
2298 if (current_cpu_type() == CPU_R6000
||
2299 current_cpu_type() == CPU_R6000A
) {
2301 * The R6000 is the only R-series CPU that features a machine
2302 * check exception (similar to the R4000 cache error) and
2303 * unaligned ldc1/sdc1 exception. The handlers have not been
2304 * written yet. Well, anyway there is no R6000 machine on the
2305 * current list of targets for Linux/MIPS.
2306 * (Duh, crap, there is someone with a triple R6k machine)
2308 //set_except_vector(14, handle_mc);
2309 //set_except_vector(15, handle_ndc);
2313 if (board_nmi_handler_setup
)
2314 board_nmi_handler_setup();
2316 if (cpu_has_fpu
&& !cpu_has_nofpuex
)
2317 set_except_vector(EXCCODE_FPE
, handle_fpe
);
2319 set_except_vector(MIPS_EXCCODE_TLBPAR
, handle_ftlb
);
2321 if (cpu_has_rixiex
) {
2322 set_except_vector(EXCCODE_TLBRI
, tlb_do_page_fault_0
);
2323 set_except_vector(EXCCODE_TLBXI
, tlb_do_page_fault_0
);
2326 set_except_vector(EXCCODE_MSADIS
, handle_msa
);
2327 set_except_vector(EXCCODE_MDMX
, handle_mdmx
);
2330 set_except_vector(EXCCODE_MCHECK
, handle_mcheck
);
2333 set_except_vector(EXCCODE_THREAD
, handle_mt
);
2335 set_except_vector(EXCCODE_DSPDIS
, handle_dsp
);
2337 if (board_cache_error_setup
)
2338 board_cache_error_setup();
2341 /* Special exception: R4[04]00 uses also the divec space. */
2342 set_handler(0x180, &except_vec3_r4000
, 0x100);
2343 else if (cpu_has_4kex
)
2344 set_handler(0x180, &except_vec3_generic
, 0x80);
2346 set_handler(0x080, &except_vec3_generic
, 0x80);
2348 local_flush_icache_range(ebase
, ebase
+ 0x400);
2350 sort_extable(__start___dbe_table
, __stop___dbe_table
);
2352 cu2_notifier(default_cu2_call
, 0x80000000); /* Run last */
2355 static int trap_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
2359 case CPU_PM_ENTER_FAILED
:
2363 configure_exception_vector();
2365 /* Restore register with CPU number for TLB handlers */
2366 TLBMISS_HANDLER_RESTORE();
2374 static struct notifier_block trap_pm_notifier_block
= {
2375 .notifier_call
= trap_pm_notifier
,
2378 static int __init
trap_pm_init(void)
2380 return cpu_pm_register_notifier(&trap_pm_notifier_block
);
2382 arch_initcall(trap_pm_init
);