MIPS: Fix BREAK code interpretation heuristics
[deliverable/linux.git] / arch / mips / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
36ccf1c0 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
1da177e4
LT
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
60b0d655 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
2a0b24f5 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
b08a9c95 13 * Copyright (C) 2014, Imagination Technologies Ltd.
1da177e4 14 */
8e8a52ed 15#include <linux/bug.h>
60b0d655 16#include <linux/compiler.h>
c3fc5cd5 17#include <linux/context_tracking.h>
ae4ce454 18#include <linux/cpu_pm.h>
7aa1c8f4 19#include <linux/kexec.h>
1da177e4 20#include <linux/init.h>
8742cd23 21#include <linux/kernel.h>
f9ded569 22#include <linux/module.h>
1da177e4 23#include <linux/mm.h>
1da177e4
LT
24#include <linux/sched.h>
25#include <linux/smp.h>
1da177e4
LT
26#include <linux/spinlock.h>
27#include <linux/kallsyms.h>
e01402b1 28#include <linux/bootmem.h>
d4fd1989 29#include <linux/interrupt.h>
39b8d525 30#include <linux/ptrace.h>
88547001
JW
31#include <linux/kgdb.h>
32#include <linux/kdebug.h>
c1bf207d 33#include <linux/kprobes.h>
69f3a7de 34#include <linux/notifier.h>
5dd11d5d 35#include <linux/kdb.h>
ca4d3e67 36#include <linux/irq.h>
7f788d2d 37#include <linux/perf_event.h>
1da177e4
LT
38
39#include <asm/bootinfo.h>
40#include <asm/branch.h>
41#include <asm/break.h>
69f3a7de 42#include <asm/cop2.h>
1da177e4 43#include <asm/cpu.h>
69f24d17 44#include <asm/cpu-type.h>
e50c0a8f 45#include <asm/dsp.h>
1da177e4 46#include <asm/fpu.h>
ba3049ed 47#include <asm/fpu_emulator.h>
bdc92d74 48#include <asm/idle.h>
b0a668fb 49#include <asm/mips-r2-to-r6-emul.h>
340ee4b9
RB
50#include <asm/mipsregs.h>
51#include <asm/mipsmtregs.h>
1da177e4 52#include <asm/module.h>
1db1af84 53#include <asm/msa.h>
1da177e4
LT
54#include <asm/pgtable.h>
55#include <asm/ptrace.h>
56#include <asm/sections.h>
1da177e4
LT
57#include <asm/tlbdebug.h>
58#include <asm/traps.h>
59#include <asm/uaccess.h>
b67b2b70 60#include <asm/watch.h>
1da177e4 61#include <asm/mmu_context.h>
1da177e4 62#include <asm/types.h>
1df0f0ff 63#include <asm/stacktrace.h>
92bbe1b9 64#include <asm/uasm.h>
1da177e4 65
c65a5480 66extern void check_wait(void);
c65a5480 67extern asmlinkage void rollback_handle_int(void);
e4ac58af 68extern asmlinkage void handle_int(void);
86a1708a
RB
69extern u32 handle_tlbl[];
70extern u32 handle_tlbs[];
71extern u32 handle_tlbm[];
1da177e4
LT
72extern asmlinkage void handle_adel(void);
73extern asmlinkage void handle_ades(void);
74extern asmlinkage void handle_ibe(void);
75extern asmlinkage void handle_dbe(void);
76extern asmlinkage void handle_sys(void);
77extern asmlinkage void handle_bp(void);
78extern asmlinkage void handle_ri(void);
5b10496b
AN
79extern asmlinkage void handle_ri_rdhwr_vivt(void);
80extern asmlinkage void handle_ri_rdhwr(void);
1da177e4
LT
81extern asmlinkage void handle_cpu(void);
82extern asmlinkage void handle_ov(void);
83extern asmlinkage void handle_tr(void);
2bcb3fbc 84extern asmlinkage void handle_msa_fpe(void);
1da177e4 85extern asmlinkage void handle_fpe(void);
75b5b5e0 86extern asmlinkage void handle_ftlb(void);
1db1af84 87extern asmlinkage void handle_msa(void);
1da177e4
LT
88extern asmlinkage void handle_mdmx(void);
89extern asmlinkage void handle_watch(void);
340ee4b9 90extern asmlinkage void handle_mt(void);
e50c0a8f 91extern asmlinkage void handle_dsp(void);
1da177e4
LT
92extern asmlinkage void handle_mcheck(void);
93extern asmlinkage void handle_reserved(void);
5890f70f 94extern void tlb_do_page_fault_0(void);
1da177e4 95
1da177e4
LT
96void (*board_be_init)(void);
97int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
e01402b1
RB
98void (*board_nmi_handler_setup)(void);
99void (*board_ejtag_handler_setup)(void);
100void (*board_bind_eic_interrupt)(int irq, int regset);
6fb97eff 101void (*board_ebase_setup)(void);
078a55fc 102void(*board_cache_error_setup)(void);
1da177e4 103
4d157d5e 104static void show_raw_backtrace(unsigned long reg29)
e889d78f 105{
39b8d525 106 unsigned long *sp = (unsigned long *)(reg29 & ~3);
e889d78f
AN
107 unsigned long addr;
108
109 printk("Call Trace:");
110#ifdef CONFIG_KALLSYMS
111 printk("\n");
112#endif
10220c88
TB
113 while (!kstack_end(sp)) {
114 unsigned long __user *p =
115 (unsigned long __user *)(unsigned long)sp++;
116 if (__get_user(addr, p)) {
117 printk(" (Bad stack address)");
118 break;
39b8d525 119 }
10220c88
TB
120 if (__kernel_text_address(addr))
121 print_ip_sym(addr);
e889d78f 122 }
10220c88 123 printk("\n");
e889d78f
AN
124}
125
f66686f7 126#ifdef CONFIG_KALLSYMS
1df0f0ff 127int raw_show_trace;
f66686f7
AN
128static int __init set_raw_show_trace(char *str)
129{
130 raw_show_trace = 1;
131 return 1;
132}
133__setup("raw_show_trace", set_raw_show_trace);
1df0f0ff 134#endif
4d157d5e 135
eae23f2c 136static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
f66686f7 137{
4d157d5e
FBH
138 unsigned long sp = regs->regs[29];
139 unsigned long ra = regs->regs[31];
f66686f7 140 unsigned long pc = regs->cp0_epc;
f66686f7 141
e909be82
VW
142 if (!task)
143 task = current;
144
f66686f7 145 if (raw_show_trace || !__kernel_text_address(pc)) {
87151ae3 146 show_raw_backtrace(sp);
f66686f7
AN
147 return;
148 }
149 printk("Call Trace:\n");
4d157d5e 150 do {
87151ae3 151 print_ip_sym(pc);
1924600c 152 pc = unwind_stack(task, &sp, pc, &ra);
4d157d5e 153 } while (pc);
f66686f7
AN
154 printk("\n");
155}
f66686f7 156
1da177e4
LT
157/*
158 * This routine abuses get_user()/put_user() to reference pointers
159 * with at least a bit of error checking ...
160 */
eae23f2c
RB
161static void show_stacktrace(struct task_struct *task,
162 const struct pt_regs *regs)
1da177e4
LT
163{
164 const int field = 2 * sizeof(unsigned long);
165 long stackdata;
166 int i;
5e0373b8 167 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
1da177e4
LT
168
169 printk("Stack :");
170 i = 0;
171 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
172 if (i && ((i % (64 / field)) == 0))
70342287 173 printk("\n ");
1da177e4
LT
174 if (i > 39) {
175 printk(" ...");
176 break;
177 }
178
179 if (__get_user(stackdata, sp++)) {
180 printk(" (Bad stack address)");
181 break;
182 }
183
184 printk(" %0*lx", field, stackdata);
185 i++;
186 }
187 printk("\n");
87151ae3 188 show_backtrace(task, regs);
f66686f7
AN
189}
190
f66686f7
AN
191void show_stack(struct task_struct *task, unsigned long *sp)
192{
193 struct pt_regs regs;
194 if (sp) {
195 regs.regs[29] = (unsigned long)sp;
196 regs.regs[31] = 0;
197 regs.cp0_epc = 0;
198 } else {
199 if (task && task != current) {
200 regs.regs[29] = task->thread.reg29;
201 regs.regs[31] = 0;
202 regs.cp0_epc = task->thread.reg31;
5dd11d5d
JW
203#ifdef CONFIG_KGDB_KDB
204 } else if (atomic_read(&kgdb_active) != -1 &&
205 kdb_current_regs) {
206 memcpy(&regs, kdb_current_regs, sizeof(regs));
207#endif /* CONFIG_KGDB_KDB */
f66686f7
AN
208 } else {
209 prepare_frametrace(&regs);
210 }
211 }
212 show_stacktrace(task, &regs);
1da177e4
LT
213}
214
e1bb8289 215static void show_code(unsigned int __user *pc)
1da177e4
LT
216{
217 long i;
39b8d525 218 unsigned short __user *pc16 = NULL;
1da177e4
LT
219
220 printk("\nCode:");
221
39b8d525
RB
222 if ((unsigned long)pc & 1)
223 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
1da177e4
LT
224 for(i = -3 ; i < 6 ; i++) {
225 unsigned int insn;
39b8d525 226 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
1da177e4
LT
227 printk(" (Bad address in epc)\n");
228 break;
229 }
39b8d525 230 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
1da177e4
LT
231 }
232}
233
eae23f2c 234static void __show_regs(const struct pt_regs *regs)
1da177e4
LT
235{
236 const int field = 2 * sizeof(unsigned long);
237 unsigned int cause = regs->cp0_cause;
238 int i;
239
a43cb95d 240 show_regs_print_info(KERN_DEFAULT);
1da177e4
LT
241
242 /*
243 * Saved main processor registers
244 */
245 for (i = 0; i < 32; ) {
246 if ((i % 4) == 0)
247 printk("$%2d :", i);
248 if (i == 0)
249 printk(" %0*lx", field, 0UL);
250 else if (i == 26 || i == 27)
251 printk(" %*s", field, "");
252 else
253 printk(" %0*lx", field, regs->regs[i]);
254
255 i++;
256 if ((i % 4) == 0)
257 printk("\n");
258 }
259
9693a853
FBH
260#ifdef CONFIG_CPU_HAS_SMARTMIPS
261 printk("Acx : %0*lx\n", field, regs->acx);
262#endif
1da177e4
LT
263 printk("Hi : %0*lx\n", field, regs->hi);
264 printk("Lo : %0*lx\n", field, regs->lo);
265
266 /*
267 * Saved cp0 registers
268 */
b012cffe
RB
269 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
270 (void *) regs->cp0_epc);
1da177e4 271 printk(" %s\n", print_tainted());
b012cffe
RB
272 printk("ra : %0*lx %pS\n", field, regs->regs[31],
273 (void *) regs->regs[31]);
1da177e4 274
70342287 275 printk("Status: %08x ", (uint32_t) regs->cp0_status);
1da177e4 276
1990e542 277 if (cpu_has_3kex) {
3b2396d9
MR
278 if (regs->cp0_status & ST0_KUO)
279 printk("KUo ");
280 if (regs->cp0_status & ST0_IEO)
281 printk("IEo ");
282 if (regs->cp0_status & ST0_KUP)
283 printk("KUp ");
284 if (regs->cp0_status & ST0_IEP)
285 printk("IEp ");
286 if (regs->cp0_status & ST0_KUC)
287 printk("KUc ");
288 if (regs->cp0_status & ST0_IEC)
289 printk("IEc ");
1990e542 290 } else if (cpu_has_4kex) {
3b2396d9
MR
291 if (regs->cp0_status & ST0_KX)
292 printk("KX ");
293 if (regs->cp0_status & ST0_SX)
294 printk("SX ");
295 if (regs->cp0_status & ST0_UX)
296 printk("UX ");
297 switch (regs->cp0_status & ST0_KSU) {
298 case KSU_USER:
299 printk("USER ");
300 break;
301 case KSU_SUPERVISOR:
302 printk("SUPERVISOR ");
303 break;
304 case KSU_KERNEL:
305 printk("KERNEL ");
306 break;
307 default:
308 printk("BAD_MODE ");
309 break;
310 }
311 if (regs->cp0_status & ST0_ERL)
312 printk("ERL ");
313 if (regs->cp0_status & ST0_EXL)
314 printk("EXL ");
315 if (regs->cp0_status & ST0_IE)
316 printk("IE ");
1da177e4 317 }
1da177e4
LT
318 printk("\n");
319
320 printk("Cause : %08x\n", cause);
321
322 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
323 if (1 <= cause && cause <= 5)
324 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
325
9966db25
RB
326 printk("PrId : %08x (%s)\n", read_c0_prid(),
327 cpu_name_string());
1da177e4
LT
328}
329
eae23f2c
RB
330/*
331 * FIXME: really the generic show_regs should take a const pointer argument.
332 */
333void show_regs(struct pt_regs *regs)
334{
335 __show_regs((struct pt_regs *)regs);
336}
337
c1bf207d 338void show_registers(struct pt_regs *regs)
1da177e4 339{
39b8d525 340 const int field = 2 * sizeof(unsigned long);
83e4da1e 341 mm_segment_t old_fs = get_fs();
39b8d525 342
eae23f2c 343 __show_regs(regs);
1da177e4 344 print_modules();
39b8d525
RB
345 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
346 current->comm, current->pid, current_thread_info(), current,
347 field, current_thread_info()->tp_value);
348 if (cpu_has_userlocal) {
349 unsigned long tls;
350
351 tls = read_c0_userlocal();
352 if (tls != current_thread_info()->tp_value)
353 printk("*HwTLS: %0*lx\n", field, tls);
354 }
355
83e4da1e
LY
356 if (!user_mode(regs))
357 /* Necessary for getting the correct stack content */
358 set_fs(KERNEL_DS);
f66686f7 359 show_stacktrace(current, regs);
e1bb8289 360 show_code((unsigned int __user *) regs->cp0_epc);
1da177e4 361 printk("\n");
83e4da1e 362 set_fs(old_fs);
1da177e4
LT
363}
364
70dc6f04
DD
365static int regs_to_trapnr(struct pt_regs *regs)
366{
367 return (regs->cp0_cause >> 2) & 0x1f;
368}
369
4d85f6af 370static DEFINE_RAW_SPINLOCK(die_lock);
1da177e4 371
70dc6f04 372void __noreturn die(const char *str, struct pt_regs *regs)
1da177e4
LT
373{
374 static int die_counter;
ce384d83 375 int sig = SIGSEGV;
1da177e4 376
8742cd23
NL
377 oops_enter();
378
dc73e4c1
RB
379 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
380 SIGSEGV) == NOTIFY_STOP)
10423c91 381 sig = 0;
5dd11d5d 382
1da177e4 383 console_verbose();
4d85f6af 384 raw_spin_lock_irq(&die_lock);
41c594ab 385 bust_spinlocks(1);
ce384d83 386
178086c8 387 printk("%s[#%d]:\n", str, ++die_counter);
1da177e4 388 show_registers(regs);
373d4d09 389 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
4d85f6af 390 raw_spin_unlock_irq(&die_lock);
d4fd1989 391
8742cd23
NL
392 oops_exit();
393
d4fd1989
MB
394 if (in_interrupt())
395 panic("Fatal exception in interrupt");
396
397 if (panic_on_oops) {
ab75dc02 398 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
d4fd1989
MB
399 ssleep(5);
400 panic("Fatal exception");
401 }
402
7aa1c8f4
RB
403 if (regs && kexec_should_crash(current))
404 crash_kexec(regs);
405
ce384d83 406 do_exit(sig);
1da177e4
LT
407}
408
0510617b
TB
409extern struct exception_table_entry __start___dbe_table[];
410extern struct exception_table_entry __stop___dbe_table[];
1da177e4 411
b6dcec9b
RB
412__asm__(
413" .section __dbe_table, \"a\"\n"
414" .previous \n");
1da177e4
LT
415
416/* Given an address, look for it in the exception tables. */
417static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
418{
419 const struct exception_table_entry *e;
420
421 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
422 if (!e)
423 e = search_module_dbetables(addr);
424 return e;
425}
426
427asmlinkage void do_be(struct pt_regs *regs)
428{
429 const int field = 2 * sizeof(unsigned long);
430 const struct exception_table_entry *fixup = NULL;
431 int data = regs->cp0_cause & 4;
432 int action = MIPS_BE_FATAL;
c3fc5cd5 433 enum ctx_state prev_state;
1da177e4 434
c3fc5cd5 435 prev_state = exception_enter();
70342287 436 /* XXX For now. Fixme, this searches the wrong table ... */
1da177e4
LT
437 if (data && !user_mode(regs))
438 fixup = search_dbe_tables(exception_epc(regs));
439
440 if (fixup)
441 action = MIPS_BE_FIXUP;
442
443 if (board_be_handler)
28fc582c 444 action = board_be_handler(regs, fixup != NULL);
1da177e4
LT
445
446 switch (action) {
447 case MIPS_BE_DISCARD:
c3fc5cd5 448 goto out;
1da177e4
LT
449 case MIPS_BE_FIXUP:
450 if (fixup) {
451 regs->cp0_epc = fixup->nextinsn;
c3fc5cd5 452 goto out;
1da177e4
LT
453 }
454 break;
455 default:
456 break;
457 }
458
459 /*
460 * Assume it would be too dangerous to continue ...
461 */
462 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
463 data ? "Data" : "Instruction",
464 field, regs->cp0_epc, field, regs->regs[31]);
dc73e4c1
RB
465 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
466 SIGBUS) == NOTIFY_STOP)
c3fc5cd5 467 goto out;
88547001 468
1da177e4
LT
469 die_if_kernel("Oops", regs);
470 force_sig(SIGBUS, current);
c3fc5cd5
RB
471
472out:
473 exception_exit(prev_state);
1da177e4
LT
474}
475
1da177e4 476/*
60b0d655 477 * ll/sc, rdhwr, sync emulation
1da177e4
LT
478 */
479
480#define OPCODE 0xfc000000
481#define BASE 0x03e00000
482#define RT 0x001f0000
483#define OFFSET 0x0000ffff
484#define LL 0xc0000000
485#define SC 0xe0000000
60b0d655 486#define SPEC0 0x00000000
3c37026d
RB
487#define SPEC3 0x7c000000
488#define RD 0x0000f800
489#define FUNC 0x0000003f
60b0d655 490#define SYNC 0x0000000f
3c37026d 491#define RDHWR 0x0000003b
1da177e4 492
2a0b24f5
SH
493/* microMIPS definitions */
494#define MM_POOL32A_FUNC 0xfc00ffff
495#define MM_RDHWR 0x00006b3c
496#define MM_RS 0x001f0000
497#define MM_RT 0x03e00000
498
1da177e4
LT
499/*
500 * The ll_bit is cleared by r*_switch.S
501 */
502
f1e39a4a
RB
503unsigned int ll_bit;
504struct task_struct *ll_task;
1da177e4 505
60b0d655 506static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
1da177e4 507{
fe00f943 508 unsigned long value, __user *vaddr;
1da177e4 509 long offset;
1da177e4
LT
510
511 /*
512 * analyse the ll instruction that just caused a ri exception
513 * and put the referenced address to addr.
514 */
515
516 /* sign extend offset */
517 offset = opcode & OFFSET;
518 offset <<= 16;
519 offset >>= 16;
520
fe00f943 521 vaddr = (unsigned long __user *)
b9688310 522 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4 523
60b0d655
MR
524 if ((unsigned long)vaddr & 3)
525 return SIGBUS;
526 if (get_user(value, vaddr))
527 return SIGSEGV;
1da177e4
LT
528
529 preempt_disable();
530
531 if (ll_task == NULL || ll_task == current) {
532 ll_bit = 1;
533 } else {
534 ll_bit = 0;
535 }
536 ll_task = current;
537
538 preempt_enable();
539
540 regs->regs[(opcode & RT) >> 16] = value;
541
60b0d655 542 return 0;
1da177e4
LT
543}
544
60b0d655 545static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
1da177e4 546{
fe00f943
RB
547 unsigned long __user *vaddr;
548 unsigned long reg;
1da177e4 549 long offset;
1da177e4
LT
550
551 /*
552 * analyse the sc instruction that just caused a ri exception
553 * and put the referenced address to addr.
554 */
555
556 /* sign extend offset */
557 offset = opcode & OFFSET;
558 offset <<= 16;
559 offset >>= 16;
560
fe00f943 561 vaddr = (unsigned long __user *)
b9688310 562 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4
LT
563 reg = (opcode & RT) >> 16;
564
60b0d655
MR
565 if ((unsigned long)vaddr & 3)
566 return SIGBUS;
1da177e4
LT
567
568 preempt_disable();
569
570 if (ll_bit == 0 || ll_task != current) {
571 regs->regs[reg] = 0;
572 preempt_enable();
60b0d655 573 return 0;
1da177e4
LT
574 }
575
576 preempt_enable();
577
60b0d655
MR
578 if (put_user(regs->regs[reg], vaddr))
579 return SIGSEGV;
1da177e4
LT
580
581 regs->regs[reg] = 1;
582
60b0d655 583 return 0;
1da177e4
LT
584}
585
586/*
587 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
588 * opcodes are supposed to result in coprocessor unusable exceptions if
589 * executed on ll/sc-less processors. That's the theory. In practice a
590 * few processors such as NEC's VR4100 throw reserved instruction exceptions
591 * instead, so we're doing the emulation thing in both exception handlers.
592 */
60b0d655 593static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
1da177e4 594{
7f788d2d
DCZ
595 if ((opcode & OPCODE) == LL) {
596 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 597 1, regs, 0);
60b0d655 598 return simulate_ll(regs, opcode);
7f788d2d
DCZ
599 }
600 if ((opcode & OPCODE) == SC) {
601 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 602 1, regs, 0);
60b0d655 603 return simulate_sc(regs, opcode);
7f788d2d 604 }
1da177e4 605
60b0d655 606 return -1; /* Must be something else ... */
1da177e4
LT
607}
608
3c37026d
RB
609/*
610 * Simulate trapping 'rdhwr' instructions to provide user accessible
1f5826bd 611 * registers not implemented in hardware.
3c37026d 612 */
2a0b24f5 613static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
3c37026d 614{
dc8f6029 615 struct thread_info *ti = task_thread_info(current);
3c37026d 616
2a0b24f5
SH
617 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
618 1, regs, 0);
619 switch (rd) {
620 case 0: /* CPU number */
621 regs->regs[rt] = smp_processor_id();
622 return 0;
623 case 1: /* SYNCI length */
624 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
625 current_cpu_data.icache.linesz);
626 return 0;
627 case 2: /* Read count register */
628 regs->regs[rt] = read_c0_count();
629 return 0;
630 case 3: /* Count register resolution */
69f24d17 631 switch (current_cpu_type()) {
2a0b24f5
SH
632 case CPU_20KC:
633 case CPU_25KF:
634 regs->regs[rt] = 1;
635 break;
636 default:
637 regs->regs[rt] = 2;
638 }
639 return 0;
640 case 29:
641 regs->regs[rt] = ti->tp_value;
642 return 0;
643 default:
644 return -1;
645 }
646}
647
648static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
649{
3c37026d
RB
650 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
651 int rd = (opcode & RD) >> 11;
652 int rt = (opcode & RT) >> 16;
2a0b24f5
SH
653
654 simulate_rdhwr(regs, rd, rt);
655 return 0;
656 }
657
658 /* Not ours. */
659 return -1;
660}
661
662static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
663{
664 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
665 int rd = (opcode & MM_RS) >> 16;
666 int rt = (opcode & MM_RT) >> 21;
667 simulate_rdhwr(regs, rd, rt);
668 return 0;
3c37026d
RB
669 }
670
56ebd51b 671 /* Not ours. */
60b0d655
MR
672 return -1;
673}
e5679882 674
60b0d655
MR
675static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
676{
7f788d2d
DCZ
677 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
678 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 679 1, regs, 0);
60b0d655 680 return 0;
7f788d2d 681 }
60b0d655
MR
682
683 return -1; /* Must be something else ... */
3c37026d
RB
684}
685
1da177e4
LT
686asmlinkage void do_ov(struct pt_regs *regs)
687{
c3fc5cd5 688 enum ctx_state prev_state;
1da177e4
LT
689 siginfo_t info;
690
c3fc5cd5 691 prev_state = exception_enter();
36ccf1c0
RB
692 die_if_kernel("Integer overflow", regs);
693
1da177e4
LT
694 info.si_code = FPE_INTOVF;
695 info.si_signo = SIGFPE;
696 info.si_errno = 0;
fe00f943 697 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4 698 force_sig_info(SIGFPE, &info, current);
c3fc5cd5 699 exception_exit(prev_state);
1da177e4
LT
700}
701
102cedc3 702int process_fpemu_return(int sig, void __user *fault_addr)
515b029d
DD
703{
704 if (sig == SIGSEGV || sig == SIGBUS) {
705 struct siginfo si = {0};
706 si.si_addr = fault_addr;
707 si.si_signo = sig;
708 if (sig == SIGSEGV) {
f7a89f1b 709 down_read(&current->mm->mmap_sem);
515b029d
DD
710 if (find_vma(current->mm, (unsigned long)fault_addr))
711 si.si_code = SEGV_ACCERR;
712 else
713 si.si_code = SEGV_MAPERR;
f7a89f1b 714 up_read(&current->mm->mmap_sem);
515b029d
DD
715 } else {
716 si.si_code = BUS_ADRERR;
717 }
718 force_sig_info(sig, &si, current);
719 return 1;
720 } else if (sig) {
721 force_sig(sig, current);
722 return 1;
723 } else {
724 return 0;
725 }
726}
727
4227a2d4
PB
728static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
729 unsigned long old_epc, unsigned long old_ra)
730{
731 union mips_instruction inst = { .word = opcode };
732 void __user *fault_addr = NULL;
733 int sig;
734
735 /* If it's obviously not an FP instruction, skip it */
736 switch (inst.i_format.opcode) {
737 case cop1_op:
738 case cop1x_op:
739 case lwc1_op:
740 case ldc1_op:
741 case swc1_op:
742 case sdc1_op:
743 break;
744
745 default:
746 return -1;
747 }
748
749 /*
750 * do_ri skipped over the instruction via compute_return_epc, undo
751 * that for the FPU emulator.
752 */
753 regs->cp0_epc = old_epc;
754 regs->regs[31] = old_ra;
755
756 /* Save the FP context to struct thread_struct */
757 lose_fpu(1);
758
759 /* Run the emulator */
760 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
761 &fault_addr);
762
763 /* If something went wrong, signal */
764 process_fpemu_return(sig, fault_addr);
765
766 /* Restore the hardware register state */
767 own_fpu(1);
768
769 return 0;
770}
771
1da177e4
LT
772/*
773 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
774 */
775asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
776{
c3fc5cd5 777 enum ctx_state prev_state;
515b029d 778 siginfo_t info = {0};
948a34cf 779
c3fc5cd5 780 prev_state = exception_enter();
dc73e4c1
RB
781 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
782 SIGFPE) == NOTIFY_STOP)
c3fc5cd5 783 goto out;
57725f9e
CD
784 die_if_kernel("FP exception in kernel code", regs);
785
1da177e4
LT
786 if (fcr31 & FPU_CSR_UNI_X) {
787 int sig;
515b029d 788 void __user *fault_addr = NULL;
1da177e4 789
1da177e4 790 /*
a3dddd56 791 * Unimplemented operation exception. If we've got the full
1da177e4
LT
792 * software emulator on-board, let's use it...
793 *
794 * Force FPU to dump state into task/thread context. We're
795 * moving a lot of data here for what is probably a single
796 * instruction, but the alternative is to pre-decode the FP
797 * register operands before invoking the emulator, which seems
798 * a bit extreme for what should be an infrequent event.
799 */
cd21dfcf 800 /* Ensure 'resume' not overwrite saved fp context again. */
53dc8028 801 lose_fpu(1);
1da177e4
LT
802
803 /* Run the emulator */
515b029d
DD
804 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
805 &fault_addr);
1da177e4
LT
806
807 /*
808 * We can't allow the emulated instruction to leave any of
809 * the cause bit set in $fcr31.
810 */
eae89076 811 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1da177e4
LT
812
813 /* Restore the hardware register state */
70342287 814 own_fpu(1); /* Using the FPU again. */
1da177e4
LT
815
816 /* If something went wrong, signal */
515b029d 817 process_fpemu_return(sig, fault_addr);
1da177e4 818
c3fc5cd5 819 goto out;
948a34cf
TS
820 } else if (fcr31 & FPU_CSR_INV_X)
821 info.si_code = FPE_FLTINV;
822 else if (fcr31 & FPU_CSR_DIV_X)
823 info.si_code = FPE_FLTDIV;
824 else if (fcr31 & FPU_CSR_OVF_X)
825 info.si_code = FPE_FLTOVF;
826 else if (fcr31 & FPU_CSR_UDF_X)
827 info.si_code = FPE_FLTUND;
828 else if (fcr31 & FPU_CSR_INE_X)
829 info.si_code = FPE_FLTRES;
830 else
831 info.si_code = __SI_FAULT;
832 info.si_signo = SIGFPE;
833 info.si_errno = 0;
834 info.si_addr = (void __user *) regs->cp0_epc;
835 force_sig_info(SIGFPE, &info, current);
c3fc5cd5
RB
836
837out:
838 exception_exit(prev_state);
1da177e4
LT
839}
840
b0a668fb 841void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
df270051 842 const char *str)
1da177e4 843{
1da177e4 844 siginfo_t info;
df270051 845 char b[40];
1da177e4 846
5dd11d5d 847#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
70dc6f04 848 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
5dd11d5d
JW
849 return;
850#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
851
dc73e4c1
RB
852 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
853 SIGTRAP) == NOTIFY_STOP)
88547001
JW
854 return;
855
1da177e4 856 /*
df270051
RB
857 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
858 * insns, even for trap and break codes that indicate arithmetic
859 * failures. Weird ...
1da177e4
LT
860 * But should we continue the brokenness??? --macro
861 */
df270051
RB
862 switch (code) {
863 case BRK_OVERFLOW:
864 case BRK_DIVZERO:
865 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
866 die_if_kernel(b, regs);
867 if (code == BRK_DIVZERO)
1da177e4
LT
868 info.si_code = FPE_INTDIV;
869 else
870 info.si_code = FPE_INTOVF;
871 info.si_signo = SIGFPE;
872 info.si_errno = 0;
fe00f943 873 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4
LT
874 force_sig_info(SIGFPE, &info, current);
875 break;
63dc68a8 876 case BRK_BUG:
df270051
RB
877 die_if_kernel("Kernel bug detected", regs);
878 force_sig(SIGTRAP, current);
63dc68a8 879 break;
ba3049ed
RB
880 case BRK_MEMU:
881 /*
1f443779
MR
882 * This breakpoint code is used by the FPU emulator to retake
883 * control of the CPU after executing the instruction from the
884 * delay slot of an emulated branch.
ba3049ed
RB
885 *
886 * Terminate if exception was recognized as a delay slot return
887 * otherwise handle as normal.
888 */
889 if (do_dsemulret(regs))
890 return;
891
892 die_if_kernel("Math emu break/trap", regs);
893 force_sig(SIGTRAP, current);
894 break;
1da177e4 895 default:
df270051
RB
896 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
897 die_if_kernel(b, regs);
1da177e4
LT
898 force_sig(SIGTRAP, current);
899 }
df270051
RB
900}
901
902asmlinkage void do_bp(struct pt_regs *regs)
903{
f6a31da5 904 unsigned long epc = msk_isa16_mode(exception_epc(regs));
df270051 905 unsigned int opcode, bcode;
c3fc5cd5 906 enum ctx_state prev_state;
078dde5e
LY
907 mm_segment_t seg;
908
909 seg = get_fs();
910 if (!user_mode(regs))
911 set_fs(KERNEL_DS);
2a0b24f5 912
c3fc5cd5 913 prev_state = exception_enter();
2a0b24f5 914 if (get_isa16_mode(regs->cp0_epc)) {
f6a31da5
MR
915 u16 instr[2];
916
917 if (__get_user(instr[0], (u16 __user *)epc))
918 goto out_sigsegv;
919
920 if (!cpu_has_mmips) {
b08a9c95 921 /* MIPS16e mode */
68893e00 922 bcode = (instr[0] >> 5) & 0x3f;
f6a31da5
MR
923 } else if (mm_insn_16bit(instr[0])) {
924 /* 16-bit microMIPS BREAK */
925 bcode = instr[0] & 0xf;
926 } else {
927 /* 32-bit microMIPS BREAK */
928 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
929 goto out_sigsegv;
930 opcode = (instr[0] << 16) | instr[1];
931 bcode = (opcode >> 6) & ((1 << 20) - 1);
2a0b24f5
SH
932 }
933 } else {
f6a31da5 934 if (__get_user(opcode, (unsigned int __user *)epc))
2a0b24f5 935 goto out_sigsegv;
f6a31da5 936 bcode = (opcode >> 6) & ((1 << 20) - 1);
2a0b24f5 937 }
df270051
RB
938
939 /*
940 * There is the ancient bug in the MIPS assemblers that the break
941 * code starts left to bit 16 instead to bit 6 in the opcode.
942 * Gas is bug-compatible, but not always, grrr...
943 * We handle both cases with a simple heuristics. --macro
944 */
df270051 945 if (bcode >= (1 << 10))
c9875032 946 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
df270051 947
c1bf207d
DD
948 /*
949 * notify the kprobe handlers, if instruction is likely to
950 * pertain to them.
951 */
952 switch (bcode) {
953 case BRK_KPROBE_BP:
dc73e4c1
RB
954 if (notify_die(DIE_BREAK, "debug", regs, bcode,
955 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
c3fc5cd5 956 goto out;
c1bf207d
DD
957 else
958 break;
959 case BRK_KPROBE_SSTEPBP:
dc73e4c1
RB
960 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
961 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
c3fc5cd5 962 goto out;
c1bf207d
DD
963 else
964 break;
965 default:
966 break;
967 }
968
df270051 969 do_trap_or_bp(regs, bcode, "Break");
c3fc5cd5
RB
970
971out:
078dde5e 972 set_fs(seg);
c3fc5cd5 973 exception_exit(prev_state);
90fccb13 974 return;
e5679882
RB
975
976out_sigsegv:
977 force_sig(SIGSEGV, current);
c3fc5cd5 978 goto out;
1da177e4
LT
979}
980
981asmlinkage void do_tr(struct pt_regs *regs)
982{
a9a6e7a0 983 u32 opcode, tcode = 0;
c3fc5cd5 984 enum ctx_state prev_state;
2a0b24f5 985 u16 instr[2];
078dde5e 986 mm_segment_t seg;
a9a6e7a0 987 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1da177e4 988
078dde5e
LY
989 seg = get_fs();
990 if (!user_mode(regs))
991 set_fs(get_ds());
992
c3fc5cd5 993 prev_state = exception_enter();
a9a6e7a0
MR
994 if (get_isa16_mode(regs->cp0_epc)) {
995 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
996 __get_user(instr[1], (u16 __user *)(epc + 2)))
2a0b24f5 997 goto out_sigsegv;
a9a6e7a0
MR
998 opcode = (instr[0] << 16) | instr[1];
999 /* Immediate versions don't provide a code. */
1000 if (!(opcode & OPCODE))
1001 tcode = (opcode >> 12) & ((1 << 4) - 1);
1002 } else {
1003 if (__get_user(opcode, (u32 __user *)epc))
1004 goto out_sigsegv;
1005 /* Immediate versions don't provide a code. */
1006 if (!(opcode & OPCODE))
1007 tcode = (opcode >> 6) & ((1 << 10) - 1);
2a0b24f5 1008 }
1da177e4 1009
df270051 1010 do_trap_or_bp(regs, tcode, "Trap");
c3fc5cd5
RB
1011
1012out:
078dde5e 1013 set_fs(seg);
c3fc5cd5 1014 exception_exit(prev_state);
90fccb13 1015 return;
e5679882
RB
1016
1017out_sigsegv:
1018 force_sig(SIGSEGV, current);
c3fc5cd5 1019 goto out;
1da177e4
LT
1020}
1021
1022asmlinkage void do_ri(struct pt_regs *regs)
1023{
60b0d655
MR
1024 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1025 unsigned long old_epc = regs->cp0_epc;
2a0b24f5 1026 unsigned long old31 = regs->regs[31];
c3fc5cd5 1027 enum ctx_state prev_state;
60b0d655
MR
1028 unsigned int opcode = 0;
1029 int status = -1;
1da177e4 1030
b0a668fb
LY
1031 /*
1032 * Avoid any kernel code. Just emulate the R2 instruction
1033 * as quickly as possible.
1034 */
1035 if (mipsr2_emulation && cpu_has_mips_r6 &&
4a7c2371
MR
1036 likely(user_mode(regs)) &&
1037 likely(get_user(opcode, epc) >= 0)) {
1038 status = mipsr2_decoder(regs, opcode);
1039 switch (status) {
1040 case 0:
1041 case SIGEMT:
1042 task_thread_info(current)->r2_emul_return = 1;
1043 return;
1044 case SIGILL:
1045 goto no_r2_instr;
1046 default:
1047 process_fpemu_return(status,
1048 &current->thread.cp0_baduaddr);
1049 task_thread_info(current)->r2_emul_return = 1;
1050 return;
b0a668fb
LY
1051 }
1052 }
1053
1054no_r2_instr:
1055
c3fc5cd5 1056 prev_state = exception_enter();
b0a668fb 1057
dc73e4c1
RB
1058 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1059 SIGILL) == NOTIFY_STOP)
c3fc5cd5 1060 goto out;
88547001 1061
60b0d655 1062 die_if_kernel("Reserved instruction in kernel code", regs);
1da177e4 1063
60b0d655 1064 if (unlikely(compute_return_epc(regs) < 0))
c3fc5cd5 1065 goto out;
3c37026d 1066
2a0b24f5
SH
1067 if (get_isa16_mode(regs->cp0_epc)) {
1068 unsigned short mmop[2] = { 0 };
60b0d655 1069
2a0b24f5
SH
1070 if (unlikely(get_user(mmop[0], epc) < 0))
1071 status = SIGSEGV;
1072 if (unlikely(get_user(mmop[1], epc) < 0))
1073 status = SIGSEGV;
1074 opcode = (mmop[0] << 16) | mmop[1];
60b0d655 1075
2a0b24f5
SH
1076 if (status < 0)
1077 status = simulate_rdhwr_mm(regs, opcode);
1078 } else {
1079 if (unlikely(get_user(opcode, epc) < 0))
1080 status = SIGSEGV;
60b0d655 1081
2a0b24f5
SH
1082 if (!cpu_has_llsc && status < 0)
1083 status = simulate_llsc(regs, opcode);
1084
1085 if (status < 0)
1086 status = simulate_rdhwr_normal(regs, opcode);
1087
1088 if (status < 0)
1089 status = simulate_sync(regs, opcode);
4227a2d4
PB
1090
1091 if (status < 0)
1092 status = simulate_fp(regs, opcode, old_epc, old31);
2a0b24f5 1093 }
60b0d655
MR
1094
1095 if (status < 0)
1096 status = SIGILL;
1097
1098 if (unlikely(status > 0)) {
1099 regs->cp0_epc = old_epc; /* Undo skip-over. */
2a0b24f5 1100 regs->regs[31] = old31;
60b0d655
MR
1101 force_sig(status, current);
1102 }
c3fc5cd5
RB
1103
1104out:
1105 exception_exit(prev_state);
1da177e4
LT
1106}
1107
d223a861
RB
1108/*
1109 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1110 * emulated more than some threshold number of instructions, force migration to
1111 * a "CPU" that has FP support.
1112 */
1113static void mt_ase_fp_affinity(void)
1114{
1115#ifdef CONFIG_MIPS_MT_FPAFF
1116 if (mt_fpemul_threshold > 0 &&
1117 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1118 /*
1119 * If there's no FPU present, or if the application has already
1120 * restricted the allowed set to exclude any CPUs with FPUs,
1121 * we'll skip the procedure.
1122 */
1123 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1124 cpumask_t tmask;
1125
9cc12363
KK
1126 current->thread.user_cpus_allowed
1127 = current->cpus_allowed;
1128 cpus_and(tmask, current->cpus_allowed,
1129 mt_fpu_cpumask);
ed1bbdef 1130 set_cpus_allowed_ptr(current, &tmask);
293c5bd1 1131 set_thread_flag(TIF_FPUBOUND);
d223a861
RB
1132 }
1133 }
1134#endif /* CONFIG_MIPS_MT_FPAFF */
1135}
1136
69f3a7de
RB
1137/*
1138 * No lock; only written during early bootup by CPU 0.
1139 */
1140static RAW_NOTIFIER_HEAD(cu2_chain);
1141
1142int __ref register_cu2_notifier(struct notifier_block *nb)
1143{
1144 return raw_notifier_chain_register(&cu2_chain, nb);
1145}
1146
1147int cu2_notifier_call_chain(unsigned long val, void *v)
1148{
1149 return raw_notifier_call_chain(&cu2_chain, val, v);
1150}
1151
1152static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
70342287 1153 void *data)
69f3a7de
RB
1154{
1155 struct pt_regs *regs = data;
1156
83bee792 1157 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
69f3a7de 1158 "instruction", regs);
83bee792 1159 force_sig(SIGILL, current);
69f3a7de
RB
1160
1161 return NOTIFY_OK;
1162}
1163
9791554b
PB
1164static int wait_on_fp_mode_switch(atomic_t *p)
1165{
1166 /*
1167 * The FP mode for this task is currently being switched. That may
1168 * involve modifications to the format of this tasks FP context which
1169 * make it unsafe to proceed with execution for the moment. Instead,
1170 * schedule some other task.
1171 */
1172 schedule();
1173 return 0;
1174}
1175
1db1af84
PB
1176static int enable_restore_fp_context(int msa)
1177{
c9017757 1178 int err, was_fpu_owner, prior_msa;
1db1af84 1179
9791554b
PB
1180 /*
1181 * If an FP mode switch is currently underway, wait for it to
1182 * complete before proceeding.
1183 */
1184 wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1185 wait_on_fp_mode_switch, TASK_KILLABLE);
1186
1db1af84
PB
1187 if (!used_math()) {
1188 /* First time FP context user. */
762a1f43 1189 preempt_disable();
1db1af84 1190 err = init_fpu();
c9017757 1191 if (msa && !err) {
1db1af84 1192 enable_msa();
c9017757 1193 _init_msa_upper();
732c0c3c
PB
1194 set_thread_flag(TIF_USEDMSA);
1195 set_thread_flag(TIF_MSA_CTX_LIVE);
c9017757 1196 }
762a1f43 1197 preempt_enable();
1db1af84
PB
1198 if (!err)
1199 set_used_math();
1200 return err;
1201 }
1202
1203 /*
1204 * This task has formerly used the FP context.
1205 *
1206 * If this thread has no live MSA vector context then we can simply
1207 * restore the scalar FP context. If it has live MSA vector context
1208 * (that is, it has or may have used MSA since last performing a
1209 * function call) then we'll need to restore the vector context. This
1210 * applies even if we're currently only executing a scalar FP
1211 * instruction. This is because if we were to later execute an MSA
1212 * instruction then we'd either have to:
1213 *
1214 * - Restore the vector context & clobber any registers modified by
1215 * scalar FP instructions between now & then.
1216 *
1217 * or
1218 *
1219 * - Not restore the vector context & lose the most significant bits
1220 * of all vector registers.
1221 *
1222 * Neither of those options is acceptable. We cannot restore the least
1223 * significant bits of the registers now & only restore the most
1224 * significant bits later because the most significant bits of any
1225 * vector registers whose aliased FP register is modified now will have
1226 * been zeroed. We'd have no way to know that when restoring the vector
1227 * context & thus may load an outdated value for the most significant
1228 * bits of a vector register.
1229 */
1230 if (!msa && !thread_msa_context_live())
1231 return own_fpu(1);
1232
1233 /*
1234 * This task is using or has previously used MSA. Thus we require
1235 * that Status.FR == 1.
1236 */
762a1f43 1237 preempt_disable();
1db1af84 1238 was_fpu_owner = is_fpu_owner();
762a1f43 1239 err = own_fpu_inatomic(0);
1db1af84 1240 if (err)
762a1f43 1241 goto out;
1db1af84
PB
1242
1243 enable_msa();
1244 write_msa_csr(current->thread.fpu.msacsr);
1245 set_thread_flag(TIF_USEDMSA);
1246
1247 /*
1248 * If this is the first time that the task is using MSA and it has
1249 * previously used scalar FP in this time slice then we already nave
c9017757
PB
1250 * FP context which we shouldn't clobber. We do however need to clear
1251 * the upper 64b of each vector register so that this task has no
1252 * opportunity to see data left behind by another.
1db1af84 1253 */
c9017757
PB
1254 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1255 if (!prior_msa && was_fpu_owner) {
1256 _init_msa_upper();
762a1f43
PB
1257
1258 goto out;
c9017757 1259 }
1db1af84 1260
c9017757
PB
1261 if (!prior_msa) {
1262 /*
1263 * Restore the least significant 64b of each vector register
1264 * from the existing scalar FP context.
1265 */
1266 _restore_fp(current);
b8340673 1267
c9017757
PB
1268 /*
1269 * The task has not formerly used MSA, so clear the upper 64b
1270 * of each vector register such that it cannot see data left
1271 * behind by another task.
1272 */
1273 _init_msa_upper();
1274 } else {
1275 /* We need to restore the vector context. */
1276 restore_msa(current);
b8340673 1277
c9017757
PB
1278 /* Restore the scalar FP control & status register */
1279 if (!was_fpu_owner)
d76e9b9f
JH
1280 write_32bit_cp1_register(CP1_STATUS,
1281 current->thread.fpu.fcr31);
c9017757 1282 }
762a1f43
PB
1283
1284out:
1285 preempt_enable();
1286
1db1af84
PB
1287 return 0;
1288}
1289
1da177e4
LT
1290asmlinkage void do_cpu(struct pt_regs *regs)
1291{
c3fc5cd5 1292 enum ctx_state prev_state;
60b0d655 1293 unsigned int __user *epc;
2a0b24f5 1294 unsigned long old_epc, old31;
60b0d655 1295 unsigned int opcode;
1da177e4 1296 unsigned int cpid;
597ce172 1297 int status, err;
f9bb4cf3 1298 unsigned long __maybe_unused flags;
1da177e4 1299
c3fc5cd5 1300 prev_state = exception_enter();
1da177e4
LT
1301 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1302
83bee792
J
1303 if (cpid != 2)
1304 die_if_kernel("do_cpu invoked from kernel context!", regs);
1305
1da177e4
LT
1306 switch (cpid) {
1307 case 0:
60b0d655
MR
1308 epc = (unsigned int __user *)exception_epc(regs);
1309 old_epc = regs->cp0_epc;
2a0b24f5 1310 old31 = regs->regs[31];
60b0d655
MR
1311 opcode = 0;
1312 status = -1;
1da177e4 1313
60b0d655 1314 if (unlikely(compute_return_epc(regs) < 0))
27e28e8e 1315 break;
3c37026d 1316
2a0b24f5
SH
1317 if (get_isa16_mode(regs->cp0_epc)) {
1318 unsigned short mmop[2] = { 0 };
60b0d655 1319
2a0b24f5
SH
1320 if (unlikely(get_user(mmop[0], epc) < 0))
1321 status = SIGSEGV;
1322 if (unlikely(get_user(mmop[1], epc) < 0))
1323 status = SIGSEGV;
1324 opcode = (mmop[0] << 16) | mmop[1];
60b0d655 1325
2a0b24f5
SH
1326 if (status < 0)
1327 status = simulate_rdhwr_mm(regs, opcode);
1328 } else {
1329 if (unlikely(get_user(opcode, epc) < 0))
1330 status = SIGSEGV;
1331
1332 if (!cpu_has_llsc && status < 0)
1333 status = simulate_llsc(regs, opcode);
1334
1335 if (status < 0)
1336 status = simulate_rdhwr_normal(regs, opcode);
1337 }
60b0d655
MR
1338
1339 if (status < 0)
1340 status = SIGILL;
1341
1342 if (unlikely(status > 0)) {
1343 regs->cp0_epc = old_epc; /* Undo skip-over. */
2a0b24f5 1344 regs->regs[31] = old31;
60b0d655
MR
1345 force_sig(status, current);
1346 }
1347
27e28e8e 1348 break;
1da177e4 1349
051ff44a
MR
1350 case 3:
1351 /*
1352 * Old (MIPS I and MIPS II) processors will set this code
1353 * for COP1X opcode instructions that replaced the original
70342287 1354 * COP3 space. We don't limit COP1 space instructions in
051ff44a
MR
1355 * the emulator according to the CPU ISA, so we want to
1356 * treat COP1X instructions consistently regardless of which
70342287 1357 * code the CPU chose. Therefore we redirect this trap to
051ff44a
MR
1358 * the FP emulator too.
1359 *
1360 * Then some newer FPU-less processors use this code
1361 * erroneously too, so they are covered by this choice
1362 * as well.
1363 */
27e28e8e
MR
1364 if (raw_cpu_has_fpu) {
1365 force_sig(SIGILL, current);
051ff44a 1366 break;
27e28e8e 1367 }
051ff44a
MR
1368 /* Fall through. */
1369
1da177e4 1370 case 1:
1db1af84 1371 err = enable_restore_fp_context(0);
1da177e4 1372
597ce172 1373 if (!raw_cpu_has_fpu || err) {
e04582b7 1374 int sig;
515b029d 1375 void __user *fault_addr = NULL;
e04582b7 1376 sig = fpu_emulator_cop1Handler(regs,
515b029d
DD
1377 &current->thread.fpu,
1378 0, &fault_addr);
597ce172 1379 if (!process_fpemu_return(sig, fault_addr) && !err)
d223a861 1380 mt_ase_fp_affinity();
1da177e4
LT
1381 }
1382
27e28e8e 1383 break;
1da177e4
LT
1384
1385 case 2:
69f3a7de 1386 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
27e28e8e 1387 break;
1da177e4
LT
1388 }
1389
c3fc5cd5 1390 exception_exit(prev_state);
1da177e4
LT
1391}
1392
2bcb3fbc
PB
1393asmlinkage void do_msa_fpe(struct pt_regs *regs)
1394{
1395 enum ctx_state prev_state;
1396
1397 prev_state = exception_enter();
1398 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1399 force_sig(SIGFPE, current);
1400 exception_exit(prev_state);
1401}
1402
1db1af84
PB
1403asmlinkage void do_msa(struct pt_regs *regs)
1404{
1405 enum ctx_state prev_state;
1406 int err;
1407
1408 prev_state = exception_enter();
1409
1410 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1411 force_sig(SIGILL, current);
1412 goto out;
1413 }
1414
1415 die_if_kernel("do_msa invoked from kernel context!", regs);
1416
1417 err = enable_restore_fp_context(1);
1418 if (err)
1419 force_sig(SIGILL, current);
1420out:
1421 exception_exit(prev_state);
1422}
1423
1da177e4
LT
1424asmlinkage void do_mdmx(struct pt_regs *regs)
1425{
c3fc5cd5
RB
1426 enum ctx_state prev_state;
1427
1428 prev_state = exception_enter();
1da177e4 1429 force_sig(SIGILL, current);
c3fc5cd5 1430 exception_exit(prev_state);
1da177e4
LT
1431}
1432
8bc6d05b
DD
1433/*
1434 * Called with interrupts disabled.
1435 */
1da177e4
LT
1436asmlinkage void do_watch(struct pt_regs *regs)
1437{
c3fc5cd5 1438 enum ctx_state prev_state;
b67b2b70
DD
1439 u32 cause;
1440
c3fc5cd5 1441 prev_state = exception_enter();
1da177e4 1442 /*
b67b2b70
DD
1443 * Clear WP (bit 22) bit of cause register so we don't loop
1444 * forever.
1da177e4 1445 */
b67b2b70
DD
1446 cause = read_c0_cause();
1447 cause &= ~(1 << 22);
1448 write_c0_cause(cause);
1449
1450 /*
1451 * If the current thread has the watch registers loaded, save
1452 * their values and send SIGTRAP. Otherwise another thread
1453 * left the registers set, clear them and continue.
1454 */
1455 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1456 mips_read_watch_registers();
8bc6d05b 1457 local_irq_enable();
b67b2b70 1458 force_sig(SIGTRAP, current);
8bc6d05b 1459 } else {
b67b2b70 1460 mips_clear_watch_registers();
8bc6d05b
DD
1461 local_irq_enable();
1462 }
c3fc5cd5 1463 exception_exit(prev_state);
1da177e4
LT
1464}
1465
1466asmlinkage void do_mcheck(struct pt_regs *regs)
1467{
cac4bcbc
RB
1468 const int field = 2 * sizeof(unsigned long);
1469 int multi_match = regs->cp0_status & ST0_TS;
c3fc5cd5 1470 enum ctx_state prev_state;
cac4bcbc 1471
c3fc5cd5 1472 prev_state = exception_enter();
1da177e4 1473 show_regs(regs);
cac4bcbc
RB
1474
1475 if (multi_match) {
314727fe
MC
1476 pr_err("Index : %0x\n", read_c0_index());
1477 pr_err("Pagemask: %0x\n", read_c0_pagemask());
1478 pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
1479 pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1480 pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
26b40ef1
MC
1481 pr_err("Wired : %0x\n", read_c0_wired());
1482 pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
31ec86b8
MC
1483 if (cpu_has_htw) {
1484 pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
1485 pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
1486 pr_err("PWCtl : %0x\n", read_c0_pwctl());
1487 }
314727fe 1488 pr_err("\n");
cac4bcbc
RB
1489 dump_tlb_all();
1490 }
1491
e1bb8289 1492 show_code((unsigned int __user *) regs->cp0_epc);
cac4bcbc 1493
1da177e4
LT
1494 /*
1495 * Some chips may have other causes of machine check (e.g. SB1
1496 * graduation timer)
1497 */
1498 panic("Caught Machine Check exception - %scaused by multiple "
1499 "matching entries in the TLB.",
cac4bcbc 1500 (multi_match) ? "" : "not ");
1da177e4
LT
1501}
1502
340ee4b9
RB
1503asmlinkage void do_mt(struct pt_regs *regs)
1504{
41c594ab
RB
1505 int subcode;
1506
41c594ab
RB
1507 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1508 >> VPECONTROL_EXCPT_SHIFT;
1509 switch (subcode) {
1510 case 0:
e35a5e35 1511 printk(KERN_DEBUG "Thread Underflow\n");
41c594ab
RB
1512 break;
1513 case 1:
e35a5e35 1514 printk(KERN_DEBUG "Thread Overflow\n");
41c594ab
RB
1515 break;
1516 case 2:
e35a5e35 1517 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
41c594ab
RB
1518 break;
1519 case 3:
e35a5e35 1520 printk(KERN_DEBUG "Gating Storage Exception\n");
41c594ab
RB
1521 break;
1522 case 4:
e35a5e35 1523 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
41c594ab
RB
1524 break;
1525 case 5:
f232c7e8 1526 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
41c594ab
RB
1527 break;
1528 default:
e35a5e35 1529 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
41c594ab
RB
1530 subcode);
1531 break;
1532 }
340ee4b9
RB
1533 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1534
1535 force_sig(SIGILL, current);
1536}
1537
1538
e50c0a8f
RB
1539asmlinkage void do_dsp(struct pt_regs *regs)
1540{
1541 if (cpu_has_dsp)
ab75dc02 1542 panic("Unexpected DSP exception");
e50c0a8f
RB
1543
1544 force_sig(SIGILL, current);
1545}
1546
1da177e4
LT
1547asmlinkage void do_reserved(struct pt_regs *regs)
1548{
1549 /*
70342287 1550 * Game over - no way to handle this if it ever occurs. Most probably
1da177e4
LT
1551 * caused by a new unknown cpu type or after another deadly
1552 * hard/software error.
1553 */
1554 show_regs(regs);
1555 panic("Caught reserved exception %ld - should not happen.",
1556 (regs->cp0_cause & 0x7f) >> 2);
1557}
1558
39b8d525
RB
1559static int __initdata l1parity = 1;
1560static int __init nol1parity(char *s)
1561{
1562 l1parity = 0;
1563 return 1;
1564}
1565__setup("nol1par", nol1parity);
1566static int __initdata l2parity = 1;
1567static int __init nol2parity(char *s)
1568{
1569 l2parity = 0;
1570 return 1;
1571}
1572__setup("nol2par", nol2parity);
1573
1da177e4
LT
1574/*
1575 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1576 * it different ways.
1577 */
1578static inline void parity_protection_init(void)
1579{
10cc3529 1580 switch (current_cpu_type()) {
1da177e4 1581 case CPU_24K:
98a41de9 1582 case CPU_34K:
39b8d525
RB
1583 case CPU_74K:
1584 case CPU_1004K:
442e14a2 1585 case CPU_1074K:
26ab96df 1586 case CPU_INTERAPTIV:
708ac4b8 1587 case CPU_PROAPTIV:
aced4cbd 1588 case CPU_P5600:
4695089f 1589 case CPU_QEMU_GENERIC:
39b8d525
RB
1590 {
1591#define ERRCTL_PE 0x80000000
1592#define ERRCTL_L2P 0x00800000
1593 unsigned long errctl;
1594 unsigned int l1parity_present, l2parity_present;
1595
1596 errctl = read_c0_ecc();
1597 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1598
1599 /* probe L1 parity support */
1600 write_c0_ecc(errctl | ERRCTL_PE);
1601 back_to_back_c0_hazard();
1602 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1603
1604 /* probe L2 parity support */
1605 write_c0_ecc(errctl|ERRCTL_L2P);
1606 back_to_back_c0_hazard();
1607 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1608
1609 if (l1parity_present && l2parity_present) {
1610 if (l1parity)
1611 errctl |= ERRCTL_PE;
1612 if (l1parity ^ l2parity)
1613 errctl |= ERRCTL_L2P;
1614 } else if (l1parity_present) {
1615 if (l1parity)
1616 errctl |= ERRCTL_PE;
1617 } else if (l2parity_present) {
1618 if (l2parity)
1619 errctl |= ERRCTL_L2P;
1620 } else {
1621 /* No parity available */
1622 }
1623
1624 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1625
1626 write_c0_ecc(errctl);
1627 back_to_back_c0_hazard();
1628 errctl = read_c0_ecc();
1629 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1630
1631 if (l1parity_present)
1632 printk(KERN_INFO "Cache parity protection %sabled\n",
1633 (errctl & ERRCTL_PE) ? "en" : "dis");
1634
1635 if (l2parity_present) {
1636 if (l1parity_present && l1parity)
1637 errctl ^= ERRCTL_L2P;
1638 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1639 (errctl & ERRCTL_L2P) ? "en" : "dis");
1640 }
1641 }
1642 break;
1643
1da177e4 1644 case CPU_5KC:
78d4803f 1645 case CPU_5KE:
2fa36399 1646 case CPU_LOONGSON1:
14f18b7f
RB
1647 write_c0_ecc(0x80000000);
1648 back_to_back_c0_hazard();
1649 /* Set the PE bit (bit 31) in the c0_errctl register. */
1650 printk(KERN_INFO "Cache parity protection %sabled\n",
1651 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1da177e4
LT
1652 break;
1653 case CPU_20KC:
1654 case CPU_25KF:
1655 /* Clear the DE bit (bit 16) in the c0_status register. */
1656 printk(KERN_INFO "Enable cache parity protection for "
1657 "MIPS 20KC/25KF CPUs.\n");
1658 clear_c0_status(ST0_DE);
1659 break;
1660 default:
1661 break;
1662 }
1663}
1664
1665asmlinkage void cache_parity_error(void)
1666{
1667 const int field = 2 * sizeof(unsigned long);
1668 unsigned int reg_val;
1669
1670 /* For the moment, report the problem and hang. */
1671 printk("Cache error exception:\n");
1672 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1673 reg_val = read_c0_cacheerr();
1674 printk("c0_cacheerr == %08x\n", reg_val);
1675
1676 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1677 reg_val & (1<<30) ? "secondary" : "primary",
1678 reg_val & (1<<31) ? "data" : "insn");
9c7d5768 1679 if ((cpu_has_mips_r2_r6) &&
721a9205 1680 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
6de20451
LY
1681 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1682 reg_val & (1<<29) ? "ED " : "",
1683 reg_val & (1<<28) ? "ET " : "",
1684 reg_val & (1<<27) ? "ES " : "",
1685 reg_val & (1<<26) ? "EE " : "",
1686 reg_val & (1<<25) ? "EB " : "",
1687 reg_val & (1<<24) ? "EI " : "",
1688 reg_val & (1<<23) ? "E1 " : "",
1689 reg_val & (1<<22) ? "E0 " : "");
1690 } else {
1691 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1692 reg_val & (1<<29) ? "ED " : "",
1693 reg_val & (1<<28) ? "ET " : "",
1694 reg_val & (1<<26) ? "EE " : "",
1695 reg_val & (1<<25) ? "EB " : "",
1696 reg_val & (1<<24) ? "EI " : "",
1697 reg_val & (1<<23) ? "E1 " : "",
1698 reg_val & (1<<22) ? "E0 " : "");
1699 }
1da177e4
LT
1700 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1701
ec917c2c 1702#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1da177e4
LT
1703 if (reg_val & (1<<22))
1704 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1705
1706 if (reg_val & (1<<23))
1707 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1708#endif
1709
1710 panic("Can't handle the cache error!");
1711}
1712
75b5b5e0
LY
1713asmlinkage void do_ftlb(void)
1714{
1715 const int field = 2 * sizeof(unsigned long);
1716 unsigned int reg_val;
1717
1718 /* For the moment, report the problem and hang. */
9c7d5768 1719 if ((cpu_has_mips_r2_r6) &&
721a9205 1720 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
75b5b5e0
LY
1721 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1722 read_c0_ecc());
1723 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1724 reg_val = read_c0_cacheerr();
1725 pr_err("c0_cacheerr == %08x\n", reg_val);
1726
1727 if ((reg_val & 0xc0000000) == 0xc0000000) {
1728 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1729 } else {
1730 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1731 reg_val & (1<<30) ? "secondary" : "primary",
1732 reg_val & (1<<31) ? "data" : "insn");
1733 }
1734 } else {
1735 pr_err("FTLB error exception\n");
1736 }
1737 /* Just print the cacheerr bits for now */
1738 cache_parity_error();
1739}
1740
1da177e4
LT
1741/*
1742 * SDBBP EJTAG debug exception handler.
1743 * We skip the instruction and return to the next instruction.
1744 */
1745void ejtag_exception_handler(struct pt_regs *regs)
1746{
1747 const int field = 2 * sizeof(unsigned long);
2a0b24f5 1748 unsigned long depc, old_epc, old_ra;
1da177e4
LT
1749 unsigned int debug;
1750
70ae6126 1751 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1da177e4
LT
1752 depc = read_c0_depc();
1753 debug = read_c0_debug();
70ae6126 1754 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1da177e4
LT
1755 if (debug & 0x80000000) {
1756 /*
1757 * In branch delay slot.
1758 * We cheat a little bit here and use EPC to calculate the
1759 * debug return address (DEPC). EPC is restored after the
1760 * calculation.
1761 */
1762 old_epc = regs->cp0_epc;
2a0b24f5 1763 old_ra = regs->regs[31];
1da177e4 1764 regs->cp0_epc = depc;
2a0b24f5 1765 compute_return_epc(regs);
1da177e4
LT
1766 depc = regs->cp0_epc;
1767 regs->cp0_epc = old_epc;
2a0b24f5 1768 regs->regs[31] = old_ra;
1da177e4
LT
1769 } else
1770 depc += 4;
1771 write_c0_depc(depc);
1772
1773#if 0
70ae6126 1774 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1da177e4
LT
1775 write_c0_debug(debug | 0x100);
1776#endif
1777}
1778
1779/*
1780 * NMI exception handler.
34bd92e2 1781 * No lock; only written during early bootup by CPU 0.
1da177e4 1782 */
34bd92e2
KC
1783static RAW_NOTIFIER_HEAD(nmi_chain);
1784
1785int register_nmi_notifier(struct notifier_block *nb)
1786{
1787 return raw_notifier_chain_register(&nmi_chain, nb);
1788}
1789
ff2d8b19 1790void __noreturn nmi_exception_handler(struct pt_regs *regs)
1da177e4 1791{
83e4da1e
LY
1792 char str[100];
1793
34bd92e2 1794 raw_notifier_call_chain(&nmi_chain, 0, regs);
41c594ab 1795 bust_spinlocks(1);
83e4da1e
LY
1796 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1797 smp_processor_id(), regs->cp0_epc);
1798 regs->cp0_epc = read_c0_errorepc();
1799 die(str, regs);
1da177e4
LT
1800}
1801
e01402b1
RB
1802#define VECTORSPACING 0x100 /* for EI/VI mode */
1803
1804unsigned long ebase;
1da177e4 1805unsigned long exception_handlers[32];
e01402b1 1806unsigned long vi_handlers[64];
1da177e4 1807
2d1b6e95 1808void __init *set_except_vector(int n, void *addr)
1da177e4
LT
1809{
1810 unsigned long handler = (unsigned long) addr;
b22d1b6a 1811 unsigned long old_handler;
1da177e4 1812
2a0b24f5
SH
1813#ifdef CONFIG_CPU_MICROMIPS
1814 /*
1815 * Only the TLB handlers are cache aligned with an even
1816 * address. All other handlers are on an odd address and
1817 * require no modification. Otherwise, MIPS32 mode will
1818 * be entered when handling any TLB exceptions. That
1819 * would be bad...since we must stay in microMIPS mode.
1820 */
1821 if (!(handler & 0x1))
1822 handler |= 1;
1823#endif
b22d1b6a 1824 old_handler = xchg(&exception_handlers[n], handler);
1da177e4 1825
1da177e4 1826 if (n == 0 && cpu_has_divec) {
2a0b24f5
SH
1827#ifdef CONFIG_CPU_MICROMIPS
1828 unsigned long jump_mask = ~((1 << 27) - 1);
1829#else
92bbe1b9 1830 unsigned long jump_mask = ~((1 << 28) - 1);
2a0b24f5 1831#endif
92bbe1b9
FF
1832 u32 *buf = (u32 *)(ebase + 0x200);
1833 unsigned int k0 = 26;
1834 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1835 uasm_i_j(&buf, handler & ~jump_mask);
1836 uasm_i_nop(&buf);
1837 } else {
1838 UASM_i_LA(&buf, k0, handler);
1839 uasm_i_jr(&buf, k0);
1840 uasm_i_nop(&buf);
1841 }
1842 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
e01402b1
RB
1843 }
1844 return (void *)old_handler;
1845}
1846
86a1708a 1847static void do_default_vi(void)
6ba07e59
AN
1848{
1849 show_regs(get_irq_regs());
1850 panic("Caught unexpected vectored interrupt.");
1851}
1852
ef300e42 1853static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
e01402b1
RB
1854{
1855 unsigned long handler;
1856 unsigned long old_handler = vi_handlers[n];
f6771dbb 1857 int srssets = current_cpu_data.srsets;
2a0b24f5 1858 u16 *h;
e01402b1
RB
1859 unsigned char *b;
1860
b72b7092 1861 BUG_ON(!cpu_has_veic && !cpu_has_vint);
e01402b1
RB
1862
1863 if (addr == NULL) {
1864 handler = (unsigned long) do_default_vi;
1865 srs = 0;
41c594ab 1866 } else
e01402b1 1867 handler = (unsigned long) addr;
2a0b24f5 1868 vi_handlers[n] = handler;
e01402b1
RB
1869
1870 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1871
f6771dbb 1872 if (srs >= srssets)
e01402b1
RB
1873 panic("Shadow register set %d not supported", srs);
1874
1875 if (cpu_has_veic) {
1876 if (board_bind_eic_interrupt)
49a89efb 1877 board_bind_eic_interrupt(n, srs);
41c594ab 1878 } else if (cpu_has_vint) {
e01402b1 1879 /* SRSMap is only defined if shadow sets are implemented */
f6771dbb 1880 if (srssets > 1)
49a89efb 1881 change_c0_srsmap(0xf << n*4, srs << n*4);
e01402b1
RB
1882 }
1883
1884 if (srs == 0) {
1885 /*
1886 * If no shadow set is selected then use the default handler
2a0b24f5 1887 * that does normal register saving and standard interrupt exit
e01402b1 1888 */
e01402b1
RB
1889 extern char except_vec_vi, except_vec_vi_lui;
1890 extern char except_vec_vi_ori, except_vec_vi_end;
c65a5480 1891 extern char rollback_except_vec_vi;
f94d9a8e 1892 char *vec_start = using_rollback_handler() ?
c65a5480 1893 &rollback_except_vec_vi : &except_vec_vi;
2a0b24f5
SH
1894#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1895 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1896 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1897#else
c65a5480
AN
1898 const int lui_offset = &except_vec_vi_lui - vec_start;
1899 const int ori_offset = &except_vec_vi_ori - vec_start;
2a0b24f5
SH
1900#endif
1901 const int handler_len = &except_vec_vi_end - vec_start;
e01402b1
RB
1902
1903 if (handler_len > VECTORSPACING) {
1904 /*
1905 * Sigh... panicing won't help as the console
1906 * is probably not configured :(
1907 */
49a89efb 1908 panic("VECTORSPACING too small");
e01402b1
RB
1909 }
1910
2a0b24f5
SH
1911 set_handler(((unsigned long)b - ebase), vec_start,
1912#ifdef CONFIG_CPU_MICROMIPS
1913 (handler_len - 1));
1914#else
1915 handler_len);
1916#endif
2a0b24f5
SH
1917 h = (u16 *)(b + lui_offset);
1918 *h = (handler >> 16) & 0xffff;
1919 h = (u16 *)(b + ori_offset);
1920 *h = (handler & 0xffff);
e0cee3ee
TB
1921 local_flush_icache_range((unsigned long)b,
1922 (unsigned long)(b+handler_len));
e01402b1
RB
1923 }
1924 else {
1925 /*
2a0b24f5
SH
1926 * In other cases jump directly to the interrupt handler. It
1927 * is the handler's responsibility to save registers if required
1928 * (eg hi/lo) and return from the exception using "eret".
e01402b1 1929 */
2a0b24f5
SH
1930 u32 insn;
1931
1932 h = (u16 *)b;
1933 /* j handler */
1934#ifdef CONFIG_CPU_MICROMIPS
1935 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1936#else
1937 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1938#endif
1939 h[0] = (insn >> 16) & 0xffff;
1940 h[1] = insn & 0xffff;
1941 h[2] = 0;
1942 h[3] = 0;
e0cee3ee
TB
1943 local_flush_icache_range((unsigned long)b,
1944 (unsigned long)(b+8));
1da177e4 1945 }
e01402b1 1946
1da177e4
LT
1947 return (void *)old_handler;
1948}
1949
ef300e42 1950void *set_vi_handler(int n, vi_handler_t addr)
e01402b1 1951{
ff3eab2a 1952 return set_vi_srs_handler(n, addr, 0);
e01402b1 1953}
f41ae0b2 1954
1da177e4
LT
1955extern void tlb_init(void);
1956
42f77542
RB
1957/*
1958 * Timer interrupt
1959 */
1960int cp0_compare_irq;
68b6352c 1961EXPORT_SYMBOL_GPL(cp0_compare_irq);
010c108d 1962int cp0_compare_irq_shift;
42f77542
RB
1963
1964/*
1965 * Performance counter IRQ or -1 if shared with timer
1966 */
1967int cp0_perfcount_irq;
1968EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1969
8f7ff027
JH
1970/*
1971 * Fast debug channel IRQ or -1 if not present
1972 */
1973int cp0_fdc_irq;
1974EXPORT_SYMBOL_GPL(cp0_fdc_irq);
1975
078a55fc 1976static int noulri;
bdc94eb4
CD
1977
1978static int __init ulri_disable(char *s)
1979{
1980 pr_info("Disabling ulri\n");
1981 noulri = 1;
1982
1983 return 1;
1984}
1985__setup("noulri", ulri_disable);
1986
ae4ce454
JH
1987/* configure STATUS register */
1988static void configure_status(void)
1da177e4 1989{
1da177e4
LT
1990 /*
1991 * Disable coprocessors and select 32-bit or 64-bit addressing
1992 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1993 * flag that some firmware may have left set and the TS bit (for
1994 * IP27). Set XX for ISA IV code to work.
1995 */
ae4ce454 1996 unsigned int status_set = ST0_CU0;
875d43e7 1997#ifdef CONFIG_64BIT
1da177e4
LT
1998 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1999#endif
adb37892 2000 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1da177e4 2001 status_set |= ST0_XX;
bbaf238b
CD
2002 if (cpu_has_dsp)
2003 status_set |= ST0_MX;
2004
b38c7399 2005 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1da177e4 2006 status_set);
ae4ce454
JH
2007}
2008
2009/* configure HWRENA register */
2010static void configure_hwrena(void)
2011{
2012 unsigned int hwrena = cpu_hwrena_impl_bits;
1da177e4 2013
9c7d5768 2014 if (cpu_has_mips_r2_r6)
18d693b3 2015 hwrena |= 0x0000000f;
a3692020 2016
18d693b3
KC
2017 if (!noulri && cpu_has_userlocal)
2018 hwrena |= (1 << 29);
a3692020 2019
18d693b3
KC
2020 if (hwrena)
2021 write_c0_hwrena(hwrena);
ae4ce454 2022}
e01402b1 2023
ae4ce454
JH
2024static void configure_exception_vector(void)
2025{
e01402b1 2026 if (cpu_has_veic || cpu_has_vint) {
9fb4c2b9 2027 unsigned long sr = set_c0_status(ST0_BEV);
49a89efb 2028 write_c0_ebase(ebase);
9fb4c2b9 2029 write_c0_status(sr);
e01402b1 2030 /* Setting vector spacing enables EI/VI mode */
49a89efb 2031 change_c0_intctl(0x3e0, VECTORSPACING);
e01402b1 2032 }
d03d0a57
RB
2033 if (cpu_has_divec) {
2034 if (cpu_has_mipsmt) {
2035 unsigned int vpflags = dvpe();
2036 set_c0_cause(CAUSEF_IV);
2037 evpe(vpflags);
2038 } else
2039 set_c0_cause(CAUSEF_IV);
2040 }
ae4ce454
JH
2041}
2042
2043void per_cpu_trap_init(bool is_boot_cpu)
2044{
2045 unsigned int cpu = smp_processor_id();
ae4ce454
JH
2046
2047 configure_status();
2048 configure_hwrena();
2049
ae4ce454 2050 configure_exception_vector();
3b1d4ed5
RB
2051
2052 /*
2053 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2054 *
2055 * o read IntCtl.IPTI to determine the timer interrupt
2056 * o read IntCtl.IPPCI to determine the performance counter interrupt
8f7ff027 2057 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
3b1d4ed5 2058 */
9c7d5768 2059 if (cpu_has_mips_r2_r6) {
010c108d
DV
2060 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2061 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2062 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
8f7ff027
JH
2063 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2064 if (!cp0_fdc_irq)
2065 cp0_fdc_irq = -1;
2066
c3e838a2
CD
2067 } else {
2068 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
c6a4ebb9 2069 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
c3e838a2 2070 cp0_perfcount_irq = -1;
8f7ff027 2071 cp0_fdc_irq = -1;
3b1d4ed5
RB
2072 }
2073
48c4ac97
DD
2074 if (!cpu_data[cpu].asid_cache)
2075 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1da177e4
LT
2076
2077 atomic_inc(&init_mm.mm_count);
2078 current->active_mm = &init_mm;
2079 BUG_ON(current->mm);
2080 enter_lazy_tlb(&init_mm, current);
2081
6650df3c
DD
2082 /* Boot CPU's cache setup in setup_arch(). */
2083 if (!is_boot_cpu)
2084 cpu_cache_init();
41c594ab 2085 tlb_init();
3d8bfdd0 2086 TLBMISS_HANDLER_SETUP();
1da177e4
LT
2087}
2088
e01402b1 2089/* Install CPU exception handler */
078a55fc 2090void set_handler(unsigned long offset, void *addr, unsigned long size)
e01402b1 2091{
2a0b24f5
SH
2092#ifdef CONFIG_CPU_MICROMIPS
2093 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2094#else
e01402b1 2095 memcpy((void *)(ebase + offset), addr, size);
2a0b24f5 2096#endif
e0cee3ee 2097 local_flush_icache_range(ebase + offset, ebase + offset + size);
e01402b1
RB
2098}
2099
078a55fc 2100static char panic_null_cerr[] =
641e97f3
RB
2101 "Trying to set NULL cache error exception handler";
2102
42fe7ee3
RB
2103/*
2104 * Install uncached CPU exception handler.
2105 * This is suitable only for the cache error exception which is the only
2106 * exception handler that is being run uncached.
2107 */
078a55fc 2108void set_uncached_handler(unsigned long offset, void *addr,
234fcd14 2109 unsigned long size)
e01402b1 2110{
4f81b01a 2111 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
e01402b1 2112
641e97f3
RB
2113 if (!addr)
2114 panic(panic_null_cerr);
2115
e01402b1
RB
2116 memcpy((void *)(uncached_ebase + offset), addr, size);
2117}
2118
5b10496b
AN
2119static int __initdata rdhwr_noopt;
2120static int __init set_rdhwr_noopt(char *str)
2121{
2122 rdhwr_noopt = 1;
2123 return 1;
2124}
2125
2126__setup("rdhwr_noopt", set_rdhwr_noopt);
2127
1da177e4
LT
2128void __init trap_init(void)
2129{
2a0b24f5 2130 extern char except_vec3_generic;
1da177e4 2131 extern char except_vec4;
2a0b24f5 2132 extern char except_vec3_r4000;
1da177e4 2133 unsigned long i;
c65a5480
AN
2134
2135 check_wait();
1da177e4 2136
88547001
JW
2137#if defined(CONFIG_KGDB)
2138 if (kgdb_early_setup)
70342287 2139 return; /* Already done */
88547001
JW
2140#endif
2141
9fb4c2b9
CD
2142 if (cpu_has_veic || cpu_has_vint) {
2143 unsigned long size = 0x200 + VECTORSPACING*64;
2144 ebase = (unsigned long)
2145 __alloc_bootmem(size, 1 << fls(size), 0);
2146 } else {
9843b030
SL
2147#ifdef CONFIG_KVM_GUEST
2148#define KVM_GUEST_KSEG0 0x40000000
2149 ebase = KVM_GUEST_KSEG0;
2150#else
2151 ebase = CKSEG0;
2152#endif
9c7d5768 2153 if (cpu_has_mips_r2_r6)
566f74f6
DD
2154 ebase += (read_c0_ebase() & 0x3ffff000);
2155 }
e01402b1 2156
c6213c6c
SH
2157 if (cpu_has_mmips) {
2158 unsigned int config3 = read_c0_config3();
2159
2160 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2161 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2162 else
2163 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2164 }
2165
6fb97eff
KC
2166 if (board_ebase_setup)
2167 board_ebase_setup();
6650df3c 2168 per_cpu_trap_init(true);
1da177e4
LT
2169
2170 /*
2171 * Copy the generic exception handlers to their final destination.
2172 * This will be overriden later as suitable for a particular
2173 * configuration.
2174 */
e01402b1 2175 set_handler(0x180, &except_vec3_generic, 0x80);
1da177e4
LT
2176
2177 /*
2178 * Setup default vectors
2179 */
2180 for (i = 0; i <= 31; i++)
2181 set_except_vector(i, handle_reserved);
2182
2183 /*
2184 * Copy the EJTAG debug exception vector handler code to it's final
2185 * destination.
2186 */
e01402b1 2187 if (cpu_has_ejtag && board_ejtag_handler_setup)
49a89efb 2188 board_ejtag_handler_setup();
1da177e4
LT
2189
2190 /*
2191 * Only some CPUs have the watch exceptions.
2192 */
2193 if (cpu_has_watch)
2194 set_except_vector(23, handle_watch);
2195
2196 /*
e01402b1 2197 * Initialise interrupt handlers
1da177e4 2198 */
e01402b1
RB
2199 if (cpu_has_veic || cpu_has_vint) {
2200 int nvec = cpu_has_veic ? 64 : 8;
2201 for (i = 0; i < nvec; i++)
ff3eab2a 2202 set_vi_handler(i, NULL);
e01402b1
RB
2203 }
2204 else if (cpu_has_divec)
2205 set_handler(0x200, &except_vec4, 0x8);
1da177e4
LT
2206
2207 /*
2208 * Some CPUs can enable/disable for cache parity detection, but does
2209 * it different ways.
2210 */
2211 parity_protection_init();
2212
2213 /*
2214 * The Data Bus Errors / Instruction Bus Errors are signaled
2215 * by external hardware. Therefore these two exceptions
2216 * may have board specific handlers.
2217 */
2218 if (board_be_init)
2219 board_be_init();
2220
f94d9a8e
RB
2221 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2222 : handle_int);
1da177e4
LT
2223 set_except_vector(1, handle_tlbm);
2224 set_except_vector(2, handle_tlbl);
2225 set_except_vector(3, handle_tlbs);
2226
2227 set_except_vector(4, handle_adel);
2228 set_except_vector(5, handle_ades);
2229
2230 set_except_vector(6, handle_ibe);
2231 set_except_vector(7, handle_dbe);
2232
2233 set_except_vector(8, handle_sys);
2234 set_except_vector(9, handle_bp);
5b10496b
AN
2235 set_except_vector(10, rdhwr_noopt ? handle_ri :
2236 (cpu_has_vtag_icache ?
2237 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1da177e4
LT
2238 set_except_vector(11, handle_cpu);
2239 set_except_vector(12, handle_ov);
2240 set_except_vector(13, handle_tr);
2bcb3fbc 2241 set_except_vector(14, handle_msa_fpe);
1da177e4 2242
10cc3529
RB
2243 if (current_cpu_type() == CPU_R6000 ||
2244 current_cpu_type() == CPU_R6000A) {
1da177e4
LT
2245 /*
2246 * The R6000 is the only R-series CPU that features a machine
2247 * check exception (similar to the R4000 cache error) and
2248 * unaligned ldc1/sdc1 exception. The handlers have not been
70342287 2249 * written yet. Well, anyway there is no R6000 machine on the
1da177e4
LT
2250 * current list of targets for Linux/MIPS.
2251 * (Duh, crap, there is someone with a triple R6k machine)
2252 */
2253 //set_except_vector(14, handle_mc);
2254 //set_except_vector(15, handle_ndc);
2255 }
2256
e01402b1
RB
2257
2258 if (board_nmi_handler_setup)
2259 board_nmi_handler_setup();
2260
e50c0a8f
RB
2261 if (cpu_has_fpu && !cpu_has_nofpuex)
2262 set_except_vector(15, handle_fpe);
2263
75b5b5e0 2264 set_except_vector(16, handle_ftlb);
5890f70f
LY
2265
2266 if (cpu_has_rixiex) {
2267 set_except_vector(19, tlb_do_page_fault_0);
2268 set_except_vector(20, tlb_do_page_fault_0);
2269 }
2270
1db1af84 2271 set_except_vector(21, handle_msa);
e50c0a8f
RB
2272 set_except_vector(22, handle_mdmx);
2273
2274 if (cpu_has_mcheck)
2275 set_except_vector(24, handle_mcheck);
2276
340ee4b9
RB
2277 if (cpu_has_mipsmt)
2278 set_except_vector(25, handle_mt);
2279
acaec427 2280 set_except_vector(26, handle_dsp);
e50c0a8f 2281
fcbf1dfd
DD
2282 if (board_cache_error_setup)
2283 board_cache_error_setup();
2284
e50c0a8f
RB
2285 if (cpu_has_vce)
2286 /* Special exception: R4[04]00 uses also the divec space. */
2a0b24f5 2287 set_handler(0x180, &except_vec3_r4000, 0x100);
e50c0a8f 2288 else if (cpu_has_4kex)
2a0b24f5 2289 set_handler(0x180, &except_vec3_generic, 0x80);
e50c0a8f 2290 else
2a0b24f5 2291 set_handler(0x080, &except_vec3_generic, 0x80);
e50c0a8f 2292
e0cee3ee 2293 local_flush_icache_range(ebase, ebase + 0x400);
0510617b
TB
2294
2295 sort_extable(__start___dbe_table, __stop___dbe_table);
69f3a7de 2296
4483b159 2297 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
1da177e4 2298}
ae4ce454
JH
2299
2300static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2301 void *v)
2302{
2303 switch (cmd) {
2304 case CPU_PM_ENTER_FAILED:
2305 case CPU_PM_EXIT:
2306 configure_status();
2307 configure_hwrena();
2308 configure_exception_vector();
2309
2310 /* Restore register with CPU number for TLB handlers */
2311 TLBMISS_HANDLER_RESTORE();
2312
2313 break;
2314 }
2315
2316 return NOTIFY_OK;
2317}
2318
2319static struct notifier_block trap_pm_notifier_block = {
2320 .notifier_call = trap_pm_notifier,
2321};
2322
2323static int __init trap_pm_init(void)
2324{
2325 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2326}
2327arch_initcall(trap_pm_init);
This page took 0.913041 seconds and 5 git commands to generate.