[POWERPC] Split out vpa unregister logic from pseries_kexec_cpu_down_xics()
[deliverable/linux.git] / arch / powerpc / kernel / traps.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
8dad3f92 23#include <linux/ptrace.h>
14cf11af
PM
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/a.out.h>
27#include <linux/interrupt.h>
14cf11af
PM
28#include <linux/init.h>
29#include <linux/module.h>
8dad3f92 30#include <linux/prctl.h>
14cf11af
PM
31#include <linux/delay.h>
32#include <linux/kprobes.h>
cc532915 33#include <linux/kexec.h>
5474c120 34#include <linux/backlight.h>
14cf11af 35
86417780 36#include <asm/kdebug.h>
14cf11af
PM
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
86417780
PM
41#include <asm/machdep.h>
42#include <asm/rtas.h>
f7f6f4fe 43#include <asm/pmc.h>
dc1c1ca3 44#ifdef CONFIG_PPC32
14cf11af 45#include <asm/reg.h>
86417780 46#endif
14cf11af
PM
47#ifdef CONFIG_PMAC_BACKLIGHT
48#include <asm/backlight.h>
49#endif
dc1c1ca3 50#ifdef CONFIG_PPC64
86417780 51#include <asm/firmware.h>
dc1c1ca3 52#include <asm/processor.h>
dc1c1ca3 53#endif
c0ce7d08 54#include <asm/kexec.h>
dc1c1ca3 55
86417780
PM
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
14cf11af
PM
60#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs);
63int (*__debugger_bpt)(struct pt_regs *regs);
64int (*__debugger_sstep)(struct pt_regs *regs);
65int (*__debugger_iabr_match)(struct pt_regs *regs);
66int (*__debugger_dabr_match)(struct pt_regs *regs);
67int (*__debugger_fault_handler)(struct pt_regs *regs);
68
69EXPORT_SYMBOL(__debugger);
70EXPORT_SYMBOL(__debugger_ipi);
71EXPORT_SYMBOL(__debugger_bpt);
72EXPORT_SYMBOL(__debugger_sstep);
73EXPORT_SYMBOL(__debugger_iabr_match);
74EXPORT_SYMBOL(__debugger_dabr_match);
75EXPORT_SYMBOL(__debugger_fault_handler);
76#endif
77
e041c683 78ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
14cf11af
PM
79
80int register_die_notifier(struct notifier_block *nb)
81{
e041c683
AS
82 return atomic_notifier_chain_register(&powerpc_die_chain, nb);
83}
84EXPORT_SYMBOL(register_die_notifier);
14cf11af 85
e041c683
AS
86int unregister_die_notifier(struct notifier_block *nb)
87{
88 return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
14cf11af 89}
e041c683 90EXPORT_SYMBOL(unregister_die_notifier);
14cf11af
PM
91
92/*
93 * Trap & Exception support
94 */
95
96static DEFINE_SPINLOCK(die_lock);
97
98int die(const char *str, struct pt_regs *regs, long err)
99{
c0ce7d08 100 static int die_counter;
14cf11af
PM
101
102 if (debugger(regs))
103 return 1;
104
105 console_verbose();
106 spin_lock_irq(&die_lock);
107 bust_spinlocks(1);
8dad3f92 108#ifdef CONFIG_PMAC_BACKLIGHT
5474c120
MH
109 mutex_lock(&pmac_backlight_mutex);
110 if (machine_is(powermac) && pmac_backlight) {
111 struct backlight_properties *props;
112
113 down(&pmac_backlight->sem);
114 props = pmac_backlight->props;
115 props->brightness = props->max_brightness;
116 props->power = FB_BLANK_UNBLANK;
117 props->update_status(pmac_backlight);
118 up(&pmac_backlight->sem);
14cf11af 119 }
5474c120 120 mutex_unlock(&pmac_backlight_mutex);
14cf11af
PM
121#endif
122 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
123#ifdef CONFIG_PREEMPT
124 printk("PREEMPT ");
14cf11af
PM
125#endif
126#ifdef CONFIG_SMP
127 printk("SMP NR_CPUS=%d ", NR_CPUS);
14cf11af
PM
128#endif
129#ifdef CONFIG_DEBUG_PAGEALLOC
130 printk("DEBUG_PAGEALLOC ");
14cf11af
PM
131#endif
132#ifdef CONFIG_NUMA
133 printk("NUMA ");
14cf11af 134#endif
e8222502
BH
135 printk("%s\n", ppc_md.name ? "" : ppc_md.name);
136
14cf11af
PM
137 print_modules();
138 show_regs(regs);
139 bust_spinlocks(0);
c0ce7d08 140 spin_unlock_irq(&die_lock);
cc532915 141
c0ce7d08
DW
142 if (kexec_should_crash(current) ||
143 kexec_sr_activated(smp_processor_id()))
cc532915 144 crash_kexec(regs);
c0ce7d08 145 crash_kexec_secondary(regs);
14cf11af
PM
146
147 if (in_interrupt())
148 panic("Fatal exception in interrupt");
149
cea6a4ba 150 if (panic_on_oops)
012c437d 151 panic("Fatal exception");
cea6a4ba 152
14cf11af
PM
153 do_exit(err);
154
155 return 0;
156}
157
158void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
159{
160 siginfo_t info;
161
162 if (!user_mode(regs)) {
163 if (die("Exception in kernel mode", regs, signr))
164 return;
165 }
166
167 memset(&info, 0, sizeof(info));
168 info.si_signo = signr;
169 info.si_code = code;
170 info.si_addr = (void __user *) addr;
171 force_sig_info(signr, &info, current);
172
173 /*
174 * Init gets no signals that it doesn't have a handler for.
175 * That's all very well, but if it has caused a synchronous
176 * exception and we ignore the resulting signal, it will just
177 * generate the same exception over and over again and we get
178 * nowhere. Better to kill it and let the kernel panic.
179 */
180 if (current->pid == 1) {
181 __sighandler_t handler;
182
183 spin_lock_irq(&current->sighand->siglock);
184 handler = current->sighand->action[signr-1].sa.sa_handler;
185 spin_unlock_irq(&current->sighand->siglock);
186 if (handler == SIG_DFL) {
187 /* init has generated a synchronous exception
188 and it doesn't have a handler for the signal */
189 printk(KERN_CRIT "init has generated signal %d "
190 "but has no handler for it\n", signr);
191 do_exit(signr);
192 }
193 }
194}
195
196#ifdef CONFIG_PPC64
197void system_reset_exception(struct pt_regs *regs)
198{
199 /* See if any machine dependent calls */
c902be71
AB
200 if (ppc_md.system_reset_exception) {
201 if (ppc_md.system_reset_exception(regs))
202 return;
203 }
14cf11af 204
c0ce7d08
DW
205#ifdef CONFIG_KEXEC
206 cpu_set(smp_processor_id(), cpus_in_sr);
207#endif
208
8dad3f92 209 die("System Reset", regs, SIGABRT);
14cf11af 210
eac8392f
DW
211 /*
212 * Some CPUs when released from the debugger will execute this path.
213 * These CPUs entered the debugger via a soft-reset. If the CPU was
214 * hung before entering the debugger it will return to the hung
215 * state when exiting this function. This causes a problem in
216 * kdump since the hung CPU(s) will not respond to the IPI sent
217 * from kdump. To prevent the problem we call crash_kexec_secondary()
218 * here. If a kdump had not been initiated or we exit the debugger
219 * with the "exit and recover" command (x) crash_kexec_secondary()
220 * will return after 5ms and the CPU returns to its previous state.
221 */
222 crash_kexec_secondary(regs);
223
14cf11af
PM
224 /* Must die if the interrupt is not recoverable */
225 if (!(regs->msr & MSR_RI))
226 panic("Unrecoverable System Reset");
227
228 /* What should we do here? We could issue a shutdown or hard reset. */
229}
230#endif
231
232/*
233 * I/O accesses can cause machine checks on powermacs.
234 * Check if the NIP corresponds to the address of a sync
235 * instruction for which there is an entry in the exception
236 * table.
237 * Note that the 601 only takes a machine check on TEA
238 * (transfer error ack) signal assertion, and does not
239 * set any of the top 16 bits of SRR1.
240 * -- paulus.
241 */
242static inline int check_io_access(struct pt_regs *regs)
243{
1a6a4ffe 244#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
14cf11af
PM
245 unsigned long msr = regs->msr;
246 const struct exception_table_entry *entry;
247 unsigned int *nip = (unsigned int *)regs->nip;
248
249 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
250 && (entry = search_exception_tables(regs->nip)) != NULL) {
251 /*
252 * Check that it's a sync instruction, or somewhere
253 * in the twi; isync; nop sequence that inb/inw/inl uses.
254 * As the address is in the exception table
255 * we should be able to read the instr there.
256 * For the debug message, we look at the preceding
257 * load or store.
258 */
259 if (*nip == 0x60000000) /* nop */
260 nip -= 2;
261 else if (*nip == 0x4c00012c) /* isync */
262 --nip;
263 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
264 /* sync or twi */
265 unsigned int rb;
266
267 --nip;
268 rb = (*nip >> 11) & 0x1f;
269 printk(KERN_DEBUG "%s bad port %lx at %p\n",
270 (*nip & 0x100)? "OUT to": "IN from",
271 regs->gpr[rb] - _IO_BASE, nip);
272 regs->msr |= MSR_RI;
273 regs->nip = entry->fixup;
274 return 1;
275 }
276 }
1a6a4ffe 277#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
14cf11af
PM
278 return 0;
279}
280
281#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
282/* On 4xx, the reason for the machine check or program exception
283 is in the ESR. */
284#define get_reason(regs) ((regs)->dsisr)
285#ifndef CONFIG_FSL_BOOKE
286#define get_mc_reason(regs) ((regs)->dsisr)
287#else
288#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
289#endif
290#define REASON_FP ESR_FP
291#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
292#define REASON_PRIVILEGED ESR_PPR
293#define REASON_TRAP ESR_PTR
294
295/* single-step stuff */
296#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
297#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
298
299#else
300/* On non-4xx, the reason for the machine check or program
301 exception is in the MSR. */
302#define get_reason(regs) ((regs)->msr)
303#define get_mc_reason(regs) ((regs)->msr)
304#define REASON_FP 0x100000
305#define REASON_ILLEGAL 0x80000
306#define REASON_PRIVILEGED 0x40000
307#define REASON_TRAP 0x20000
308
309#define single_stepping(regs) ((regs)->msr & MSR_SE)
310#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
311#endif
312
313/*
314 * This is "fall-back" implementation for configurations
315 * which don't provide platform-specific machine check info
316 */
317void __attribute__ ((weak))
318platform_machine_check(struct pt_regs *regs)
319{
320}
321
dc1c1ca3 322void machine_check_exception(struct pt_regs *regs)
14cf11af 323{
14cf11af 324 int recover = 0;
1a6a4ffe 325 unsigned long reason = get_mc_reason(regs);
14cf11af
PM
326
327 /* See if any machine dependent calls */
328 if (ppc_md.machine_check_exception)
329 recover = ppc_md.machine_check_exception(regs);
330
331 if (recover)
332 return;
14cf11af
PM
333
334 if (user_mode(regs)) {
335 regs->msr |= MSR_RI;
336 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
337 return;
338 }
339
340#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
341 /* the qspan pci read routines can cause machine checks -- Cort */
342 bad_page_fault(regs, regs->dar, SIGBUS);
343 return;
344#endif
345
346 if (debugger_fault_handler(regs)) {
347 regs->msr |= MSR_RI;
348 return;
349 }
350
351 if (check_io_access(regs))
352 return;
353
354#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
355 if (reason & ESR_IMCP) {
356 printk("Instruction");
357 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
358 } else
359 printk("Data");
360 printk(" machine check in kernel mode.\n");
361#elif defined(CONFIG_440A)
362 printk("Machine check in kernel mode.\n");
363 if (reason & ESR_IMCP){
364 printk("Instruction Synchronous Machine Check exception\n");
365 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
366 }
367 else {
368 u32 mcsr = mfspr(SPRN_MCSR);
369 if (mcsr & MCSR_IB)
370 printk("Instruction Read PLB Error\n");
371 if (mcsr & MCSR_DRB)
372 printk("Data Read PLB Error\n");
373 if (mcsr & MCSR_DWB)
374 printk("Data Write PLB Error\n");
375 if (mcsr & MCSR_TLBP)
376 printk("TLB Parity Error\n");
377 if (mcsr & MCSR_ICP){
378 flush_instruction_cache();
379 printk("I-Cache Parity Error\n");
380 }
381 if (mcsr & MCSR_DCSP)
382 printk("D-Cache Search Parity Error\n");
383 if (mcsr & MCSR_DCFP)
384 printk("D-Cache Flush Parity Error\n");
385 if (mcsr & MCSR_IMPE)
386 printk("Machine Check exception is imprecise\n");
387
388 /* Clear MCSR */
389 mtspr(SPRN_MCSR, mcsr);
390 }
391#elif defined (CONFIG_E500)
392 printk("Machine check in kernel mode.\n");
393 printk("Caused by (from MCSR=%lx): ", reason);
394
395 if (reason & MCSR_MCP)
396 printk("Machine Check Signal\n");
397 if (reason & MCSR_ICPERR)
398 printk("Instruction Cache Parity Error\n");
399 if (reason & MCSR_DCP_PERR)
400 printk("Data Cache Push Parity Error\n");
401 if (reason & MCSR_DCPERR)
402 printk("Data Cache Parity Error\n");
403 if (reason & MCSR_GL_CI)
404 printk("Guarded Load or Cache-Inhibited stwcx.\n");
405 if (reason & MCSR_BUS_IAERR)
406 printk("Bus - Instruction Address Error\n");
407 if (reason & MCSR_BUS_RAERR)
408 printk("Bus - Read Address Error\n");
409 if (reason & MCSR_BUS_WAERR)
410 printk("Bus - Write Address Error\n");
411 if (reason & MCSR_BUS_IBERR)
412 printk("Bus - Instruction Data Error\n");
413 if (reason & MCSR_BUS_RBERR)
414 printk("Bus - Read Data Bus Error\n");
415 if (reason & MCSR_BUS_WBERR)
416 printk("Bus - Read Data Bus Error\n");
417 if (reason & MCSR_BUS_IPERR)
418 printk("Bus - Instruction Parity Error\n");
419 if (reason & MCSR_BUS_RPERR)
420 printk("Bus - Read Parity Error\n");
421#elif defined (CONFIG_E200)
422 printk("Machine check in kernel mode.\n");
423 printk("Caused by (from MCSR=%lx): ", reason);
424
425 if (reason & MCSR_MCP)
426 printk("Machine Check Signal\n");
427 if (reason & MCSR_CP_PERR)
428 printk("Cache Push Parity Error\n");
429 if (reason & MCSR_CPERR)
430 printk("Cache Parity Error\n");
431 if (reason & MCSR_EXCP_ERR)
432 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
433 if (reason & MCSR_BUS_IRERR)
434 printk("Bus - Read Bus Error on instruction fetch\n");
435 if (reason & MCSR_BUS_DRERR)
436 printk("Bus - Read Bus Error on data load\n");
437 if (reason & MCSR_BUS_WRERR)
438 printk("Bus - Write Bus Error on buffered store or cache line push\n");
439#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
440 printk("Machine check in kernel mode.\n");
441 printk("Caused by (from SRR1=%lx): ", reason);
442 switch (reason & 0x601F0000) {
443 case 0x80000:
444 printk("Machine check signal\n");
445 break;
446 case 0: /* for 601 */
447 case 0x40000:
448 case 0x140000: /* 7450 MSS error and TEA */
449 printk("Transfer error ack signal\n");
450 break;
451 case 0x20000:
452 printk("Data parity error signal\n");
453 break;
454 case 0x10000:
455 printk("Address parity error signal\n");
456 break;
457 case 0x20000000:
458 printk("L1 Data Cache error\n");
459 break;
460 case 0x40000000:
461 printk("L1 Instruction Cache error\n");
462 break;
463 case 0x00100000:
464 printk("L2 data cache parity error\n");
465 break;
466 default:
467 printk("Unknown values in msr\n");
468 }
469#endif /* CONFIG_4xx */
470
471 /*
472 * Optional platform-provided routine to print out
473 * additional info, e.g. bus error registers.
474 */
475 platform_machine_check(regs);
14cf11af
PM
476
477 if (debugger_fault_handler(regs))
478 return;
8dad3f92 479 die("Machine check", regs, SIGBUS);
14cf11af
PM
480
481 /* Must die if the interrupt is not recoverable */
482 if (!(regs->msr & MSR_RI))
483 panic("Unrecoverable Machine check");
484}
485
486void SMIException(struct pt_regs *regs)
487{
488 die("System Management Interrupt", regs, SIGABRT);
489}
490
dc1c1ca3 491void unknown_exception(struct pt_regs *regs)
14cf11af
PM
492{
493 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
494 regs->nip, regs->msr, regs->trap);
495
496 _exception(SIGTRAP, regs, 0, 0);
497}
498
dc1c1ca3 499void instruction_breakpoint_exception(struct pt_regs *regs)
14cf11af
PM
500{
501 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
502 5, SIGTRAP) == NOTIFY_STOP)
503 return;
504 if (debugger_iabr_match(regs))
505 return;
506 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
507}
508
509void RunModeException(struct pt_regs *regs)
510{
511 _exception(SIGTRAP, regs, 0, 0);
512}
513
8dad3f92 514void __kprobes single_step_exception(struct pt_regs *regs)
14cf11af
PM
515{
516 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
517
518 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
519 5, SIGTRAP) == NOTIFY_STOP)
520 return;
521 if (debugger_sstep(regs))
522 return;
523
524 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
525}
526
527/*
528 * After we have successfully emulated an instruction, we have to
529 * check if the instruction was being single-stepped, and if so,
530 * pretend we got a single-step exception. This was pointed out
531 * by Kumar Gala. -- paulus
532 */
8dad3f92 533static void emulate_single_step(struct pt_regs *regs)
14cf11af
PM
534{
535 if (single_stepping(regs)) {
536 clear_single_step(regs);
537 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
538 }
539}
540
dc1c1ca3
SR
541static void parse_fpe(struct pt_regs *regs)
542{
543 int code = 0;
544 unsigned long fpscr;
545
dc1c1ca3 546 flush_fp_to_thread(current);
dc1c1ca3 547
25c8a78b 548 fpscr = current->thread.fpscr.val;
dc1c1ca3
SR
549
550 /* Invalid operation */
551 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
552 code = FPE_FLTINV;
553
554 /* Overflow */
555 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
556 code = FPE_FLTOVF;
557
558 /* Underflow */
559 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
560 code = FPE_FLTUND;
561
562 /* Divide by zero */
563 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
564 code = FPE_FLTDIV;
565
566 /* Inexact result */
567 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
568 code = FPE_FLTRES;
569
570 _exception(SIGFPE, regs, code, regs->nip);
571}
572
573/*
574 * Illegal instruction emulation support. Originally written to
14cf11af
PM
575 * provide the PVR to user applications using the mfspr rd, PVR.
576 * Return non-zero if we can't emulate, or -EFAULT if the associated
577 * memory access caused an access fault. Return zero on success.
578 *
579 * There are a couple of ways to do this, either "decode" the instruction
580 * or directly match lots of bits. In this case, matching lots of
581 * bits is faster and easier.
86417780 582 *
14cf11af
PM
583 */
584#define INST_MFSPR_PVR 0x7c1f42a6
585#define INST_MFSPR_PVR_MASK 0xfc1fffff
586
587#define INST_DCBA 0x7c0005ec
87589f08 588#define INST_DCBA_MASK 0xfc0007fe
14cf11af
PM
589
590#define INST_MCRXR 0x7c000400
87589f08 591#define INST_MCRXR_MASK 0xfc0007fe
14cf11af
PM
592
593#define INST_STRING 0x7c00042a
87589f08
PM
594#define INST_STRING_MASK 0xfc0007fe
595#define INST_STRING_GEN_MASK 0xfc00067e
14cf11af
PM
596#define INST_LSWI 0x7c0004aa
597#define INST_LSWX 0x7c00042a
598#define INST_STSWI 0x7c0005aa
599#define INST_STSWX 0x7c00052a
600
601static int emulate_string_inst(struct pt_regs *regs, u32 instword)
602{
603 u8 rT = (instword >> 21) & 0x1f;
604 u8 rA = (instword >> 16) & 0x1f;
605 u8 NB_RB = (instword >> 11) & 0x1f;
606 u32 num_bytes;
607 unsigned long EA;
608 int pos = 0;
609
610 /* Early out if we are an invalid form of lswx */
611 if ((instword & INST_STRING_MASK) == INST_LSWX)
612 if ((rT == rA) || (rT == NB_RB))
613 return -EINVAL;
614
615 EA = (rA == 0) ? 0 : regs->gpr[rA];
616
617 switch (instword & INST_STRING_MASK) {
618 case INST_LSWX:
619 case INST_STSWX:
620 EA += NB_RB;
621 num_bytes = regs->xer & 0x7f;
622 break;
623 case INST_LSWI:
624 case INST_STSWI:
625 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
626 break;
627 default:
628 return -EINVAL;
629 }
630
631 while (num_bytes != 0)
632 {
633 u8 val;
634 u32 shift = 8 * (3 - (pos & 0x3));
635
636 switch ((instword & INST_STRING_MASK)) {
637 case INST_LSWX:
638 case INST_LSWI:
639 if (get_user(val, (u8 __user *)EA))
640 return -EFAULT;
641 /* first time updating this reg,
642 * zero it out */
643 if (pos == 0)
644 regs->gpr[rT] = 0;
645 regs->gpr[rT] |= val << shift;
646 break;
647 case INST_STSWI:
648 case INST_STSWX:
649 val = regs->gpr[rT] >> shift;
650 if (put_user(val, (u8 __user *)EA))
651 return -EFAULT;
652 break;
653 }
654 /* move EA to next address */
655 EA += 1;
656 num_bytes--;
657
658 /* manage our position within the register */
659 if (++pos == 4) {
660 pos = 0;
661 if (++rT == 32)
662 rT = 0;
663 }
664 }
665
666 return 0;
667}
668
669static int emulate_instruction(struct pt_regs *regs)
670{
671 u32 instword;
672 u32 rd;
673
fab5db97 674 if (!user_mode(regs) || (regs->msr & MSR_LE))
14cf11af
PM
675 return -EINVAL;
676 CHECK_FULL_REGS(regs);
677
678 if (get_user(instword, (u32 __user *)(regs->nip)))
679 return -EFAULT;
680
681 /* Emulate the mfspr rD, PVR. */
682 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
683 rd = (instword >> 21) & 0x1f;
684 regs->gpr[rd] = mfspr(SPRN_PVR);
685 return 0;
686 }
687
688 /* Emulating the dcba insn is just a no-op. */
8dad3f92 689 if ((instword & INST_DCBA_MASK) == INST_DCBA)
14cf11af
PM
690 return 0;
691
692 /* Emulate the mcrxr insn. */
693 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
86417780 694 int shift = (instword >> 21) & 0x1c;
14cf11af
PM
695 unsigned long msk = 0xf0000000UL >> shift;
696
697 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
698 regs->xer &= ~0xf0000000UL;
699 return 0;
700 }
701
702 /* Emulate load/store string insn. */
703 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
704 return emulate_string_inst(regs, instword);
705
706 return -EINVAL;
707}
708
709/*
710 * Look through the list of trap instructions that are used for BUG(),
711 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
712 * that the exception was caused by a trap instruction of some kind.
713 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
714 * otherwise.
715 */
716extern struct bug_entry __start___bug_table[], __stop___bug_table[];
717
718#ifndef CONFIG_MODULES
719#define module_find_bug(x) NULL
720#endif
721
722struct bug_entry *find_bug(unsigned long bugaddr)
723{
724 struct bug_entry *bug;
725
726 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
727 if (bugaddr == bug->bug_addr)
728 return bug;
729 return module_find_bug(bugaddr);
730}
731
dc1c1ca3 732static int check_bug_trap(struct pt_regs *regs)
14cf11af
PM
733{
734 struct bug_entry *bug;
735 unsigned long addr;
736
737 if (regs->msr & MSR_PR)
738 return 0; /* not in kernel */
739 addr = regs->nip; /* address of trap instruction */
740 if (addr < PAGE_OFFSET)
741 return 0;
742 bug = find_bug(regs->nip);
743 if (bug == NULL)
744 return 0;
745 if (bug->line & BUG_WARNING_TRAP) {
746 /* this is a WARN_ON rather than BUG/BUG_ON */
104dd65f 747 printk(KERN_ERR "Badness in %s at %s:%ld\n",
14cf11af
PM
748 bug->function, bug->file,
749 bug->line & ~BUG_WARNING_TRAP);
750 dump_stack();
751 return 1;
752 }
104dd65f 753 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
14cf11af
PM
754 bug->function, bug->file, bug->line);
755
756 return 0;
757}
758
8dad3f92 759void __kprobes program_check_exception(struct pt_regs *regs)
14cf11af
PM
760{
761 unsigned int reason = get_reason(regs);
762 extern int do_mathemu(struct pt_regs *regs);
763
8dad3f92 764#ifdef CONFIG_MATH_EMULATION
14cf11af
PM
765 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
766 * but there seems to be a hardware bug on the 405GP (RevD)
767 * that means ESR is sometimes set incorrectly - either to
768 * ESR_DST (!?) or 0. In the process of chasing this with the
769 * hardware people - not sure if it can happen on any illegal
770 * instruction or only on FP instructions, whether there is a
771 * pattern to occurences etc. -dgibson 31/Mar/2003 */
772 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
773 emulate_single_step(regs);
774 return;
775 }
8dad3f92 776#endif /* CONFIG_MATH_EMULATION */
14cf11af 777
dc1c1ca3
SR
778 if (reason & REASON_FP) {
779 /* IEEE FP exception */
780 parse_fpe(regs);
8dad3f92
PM
781 return;
782 }
783 if (reason & REASON_TRAP) {
14cf11af 784 /* trap exception */
dc1c1ca3
SR
785 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
786 == NOTIFY_STOP)
787 return;
14cf11af
PM
788 if (debugger_bpt(regs))
789 return;
790 if (check_bug_trap(regs)) {
791 regs->nip += 4;
792 return;
793 }
8dad3f92
PM
794 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
795 return;
796 }
797
cd8a5673
PM
798 local_irq_enable();
799
8dad3f92
PM
800 /* Try to emulate it if we should. */
801 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
14cf11af
PM
802 switch (emulate_instruction(regs)) {
803 case 0:
804 regs->nip += 4;
805 emulate_single_step(regs);
8dad3f92 806 return;
14cf11af
PM
807 case -EFAULT:
808 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
8dad3f92 809 return;
14cf11af
PM
810 }
811 }
8dad3f92
PM
812
813 if (reason & REASON_PRIVILEGED)
814 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
815 else
816 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
14cf11af
PM
817}
818
dc1c1ca3 819void alignment_exception(struct pt_regs *regs)
14cf11af 820{
e9370ae1 821 int fixed = 0;
14cf11af 822
e9370ae1
PM
823 /* we don't implement logging of alignment exceptions */
824 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
825 fixed = fix_alignment(regs);
14cf11af
PM
826
827 if (fixed == 1) {
828 regs->nip += 4; /* skip over emulated instruction */
829 emulate_single_step(regs);
830 return;
831 }
832
dc1c1ca3 833 /* Operand address was bad */
14cf11af
PM
834 if (fixed == -EFAULT) {
835 if (user_mode(regs))
8dad3f92 836 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
14cf11af
PM
837 else
838 /* Search exception table */
839 bad_page_fault(regs, regs->dar, SIGSEGV);
840 return;
841 }
8dad3f92 842 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
14cf11af
PM
843}
844
845void StackOverflow(struct pt_regs *regs)
846{
847 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
848 current, regs->gpr[1]);
849 debugger(regs);
850 show_regs(regs);
851 panic("kernel stack overflow");
852}
853
854void nonrecoverable_exception(struct pt_regs *regs)
855{
856 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
857 regs->nip, regs->msr);
858 debugger(regs);
859 die("nonrecoverable exception", regs, SIGKILL);
860}
861
862void trace_syscall(struct pt_regs *regs)
863{
864 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
865 current, current->pid, regs->nip, regs->link, regs->gpr[0],
866 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
867}
dc1c1ca3 868
dc1c1ca3
SR
869void kernel_fp_unavailable_exception(struct pt_regs *regs)
870{
871 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
872 "%lx at %lx\n", regs->trap, regs->nip);
873 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
874}
dc1c1ca3
SR
875
876void altivec_unavailable_exception(struct pt_regs *regs)
877{
8dad3f92 878#if !defined(CONFIG_ALTIVEC)
dc1c1ca3
SR
879 if (user_mode(regs)) {
880 /* A user program has executed an altivec instruction,
881 but this kernel doesn't support altivec. */
882 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
883 return;
884 }
885#endif
dc1c1ca3
SR
886 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
887 "%lx at %lx\n", regs->trap, regs->nip);
888 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
dc1c1ca3
SR
889}
890
dc1c1ca3
SR
891void performance_monitor_exception(struct pt_regs *regs)
892{
893 perf_irq(regs);
894}
dc1c1ca3 895
8dad3f92 896#ifdef CONFIG_8xx
14cf11af
PM
897void SoftwareEmulation(struct pt_regs *regs)
898{
899 extern int do_mathemu(struct pt_regs *);
900 extern int Soft_emulate_8xx(struct pt_regs *);
901 int errcode;
902
903 CHECK_FULL_REGS(regs);
904
905 if (!user_mode(regs)) {
906 debugger(regs);
907 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
908 }
909
910#ifdef CONFIG_MATH_EMULATION
911 errcode = do_mathemu(regs);
912#else
913 errcode = Soft_emulate_8xx(regs);
914#endif
915 if (errcode) {
916 if (errcode > 0)
917 _exception(SIGFPE, regs, 0, 0);
918 else if (errcode == -EFAULT)
919 _exception(SIGSEGV, regs, 0, 0);
920 else
921 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
922 } else
923 emulate_single_step(regs);
924}
8dad3f92 925#endif /* CONFIG_8xx */
14cf11af
PM
926
927#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
928
929void DebugException(struct pt_regs *regs, unsigned long debug_status)
930{
931 if (debug_status & DBSR_IC) { /* instruction completion */
932 regs->msr &= ~MSR_DE;
933 if (user_mode(regs)) {
934 current->thread.dbcr0 &= ~DBCR0_IC;
935 } else {
936 /* Disable instruction completion */
937 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
938 /* Clear the instruction completion event */
939 mtspr(SPRN_DBSR, DBSR_IC);
940 if (debugger_sstep(regs))
941 return;
942 }
943 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
944 }
945}
946#endif /* CONFIG_4xx || CONFIG_BOOKE */
947
948#if !defined(CONFIG_TAU_INT)
949void TAUException(struct pt_regs *regs)
950{
951 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
952 regs->nip, regs->msr, regs->trap, print_tainted());
953}
954#endif /* CONFIG_INT_TAU */
14cf11af
PM
955
956#ifdef CONFIG_ALTIVEC
dc1c1ca3 957void altivec_assist_exception(struct pt_regs *regs)
14cf11af
PM
958{
959 int err;
960
14cf11af
PM
961 if (!user_mode(regs)) {
962 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
963 " at %lx\n", regs->nip);
8dad3f92 964 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
14cf11af
PM
965 }
966
dc1c1ca3 967 flush_altivec_to_thread(current);
dc1c1ca3 968
14cf11af
PM
969 err = emulate_altivec(regs);
970 if (err == 0) {
971 regs->nip += 4; /* skip emulated instruction */
972 emulate_single_step(regs);
973 return;
974 }
975
976 if (err == -EFAULT) {
977 /* got an error reading the instruction */
978 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
979 } else {
980 /* didn't recognize the instruction */
981 /* XXX quick hack for now: set the non-Java bit in the VSCR */
982 if (printk_ratelimit())
983 printk(KERN_ERR "Unrecognized altivec instruction "
984 "in %s at %lx\n", current->comm, regs->nip);
985 current->thread.vscr.u[3] |= 0x10000;
986 }
987}
988#endif /* CONFIG_ALTIVEC */
989
14cf11af
PM
990#ifdef CONFIG_FSL_BOOKE
991void CacheLockingException(struct pt_regs *regs, unsigned long address,
992 unsigned long error_code)
993{
994 /* We treat cache locking instructions from the user
995 * as priv ops, in the future we could try to do
996 * something smarter
997 */
998 if (error_code & (ESR_DLK|ESR_ILK))
999 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1000 return;
1001}
1002#endif /* CONFIG_FSL_BOOKE */
1003
1004#ifdef CONFIG_SPE
1005void SPEFloatingPointException(struct pt_regs *regs)
1006{
1007 unsigned long spefscr;
1008 int fpexc_mode;
1009 int code = 0;
1010
1011 spefscr = current->thread.spefscr;
1012 fpexc_mode = current->thread.fpexc_mode;
1013
1014 /* Hardware does not neccessarily set sticky
1015 * underflow/overflow/invalid flags */
1016 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1017 code = FPE_FLTOVF;
1018 spefscr |= SPEFSCR_FOVFS;
1019 }
1020 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1021 code = FPE_FLTUND;
1022 spefscr |= SPEFSCR_FUNFS;
1023 }
1024 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1025 code = FPE_FLTDIV;
1026 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1027 code = FPE_FLTINV;
1028 spefscr |= SPEFSCR_FINVS;
1029 }
1030 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1031 code = FPE_FLTRES;
1032
1033 current->thread.spefscr = spefscr;
1034
1035 _exception(SIGFPE, regs, code, regs->nip);
1036 return;
1037}
1038#endif
1039
dc1c1ca3
SR
1040/*
1041 * We enter here if we get an unrecoverable exception, that is, one
1042 * that happened at a point where the RI (recoverable interrupt) bit
1043 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1044 * we therefore lost state by taking this exception.
1045 */
1046void unrecoverable_exception(struct pt_regs *regs)
1047{
1048 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1049 regs->trap, regs->nip);
1050 die("Unrecoverable exception", regs, SIGABRT);
1051}
dc1c1ca3 1052
14cf11af
PM
1053#ifdef CONFIG_BOOKE_WDT
1054/*
1055 * Default handler for a Watchdog exception,
1056 * spins until a reboot occurs
1057 */
1058void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1059{
1060 /* Generic WatchdogHandler, implement your own */
1061 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1062 return;
1063}
1064
1065void WatchdogException(struct pt_regs *regs)
1066{
1067 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1068 WatchdogHandler(regs);
1069}
1070#endif
dc1c1ca3 1071
dc1c1ca3
SR
1072/*
1073 * We enter here if we discover during exception entry that we are
1074 * running in supervisor mode with a userspace value in the stack pointer.
1075 */
1076void kernel_bad_stack(struct pt_regs *regs)
1077{
1078 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1079 regs->gpr[1], regs->nip);
1080 die("Bad kernel stack pointer", regs, SIGABRT);
1081}
14cf11af
PM
1082
1083void __init trap_init(void)
1084{
1085}
This page took 0.154173 seconds and 5 git commands to generate.