move die notifier handling to common code
[deliverable/linux.git] / arch / ia64 / kernel / traps.c
1 /*
2 * Architecture-specific trap handling.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/tty.h>
14 #include <linux/vt_kern.h> /* For unblank_screen() */
15 #include <linux/module.h> /* for EXPORT_SYMBOL */
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/delay.h> /* for ssleep() */
19 #include <linux/kdebug.h>
20
21 #include <asm/fpswa.h>
22 #include <asm/ia32.h>
23 #include <asm/intrinsics.h>
24 #include <asm/processor.h>
25 #include <asm/uaccess.h>
26
27 fpswa_interface_t *fpswa_interface;
28 EXPORT_SYMBOL(fpswa_interface);
29
30 void __init
31 trap_init (void)
32 {
33 if (ia64_boot_param->fpswa)
34 /* FPSWA fixup: make the interface pointer a kernel virtual address: */
35 fpswa_interface = __va(ia64_boot_param->fpswa);
36 }
37
38 void
39 die (const char *str, struct pt_regs *regs, long err)
40 {
41 static struct {
42 spinlock_t lock;
43 u32 lock_owner;
44 int lock_owner_depth;
45 } die = {
46 .lock = SPIN_LOCK_UNLOCKED,
47 .lock_owner = -1,
48 .lock_owner_depth = 0
49 };
50 static int die_counter;
51 int cpu = get_cpu();
52
53 if (die.lock_owner != cpu) {
54 console_verbose();
55 spin_lock_irq(&die.lock);
56 die.lock_owner = cpu;
57 die.lock_owner_depth = 0;
58 bust_spinlocks(1);
59 }
60 put_cpu();
61
62 if (++die.lock_owner_depth < 3) {
63 printk("%s[%d]: %s %ld [%d]\n",
64 current->comm, current->pid, str, err, ++die_counter);
65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
66 show_regs(regs);
67 } else
68 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
69
70 bust_spinlocks(0);
71 die.lock_owner = -1;
72 spin_unlock_irq(&die.lock);
73
74 if (panic_on_oops)
75 panic("Fatal exception");
76
77 do_exit(SIGSEGV);
78 }
79
80 void
81 die_if_kernel (char *str, struct pt_regs *regs, long err)
82 {
83 if (!user_mode(regs))
84 die(str, regs, err);
85 }
86
87 void
88 __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
89 {
90 siginfo_t siginfo;
91 int sig, code;
92
93 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
94 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
95 siginfo.si_imm = break_num;
96 siginfo.si_flags = 0; /* clear __ISR_VALID */
97 siginfo.si_isr = 0;
98
99 switch (break_num) {
100 case 0: /* unknown error (used by GCC for __builtin_abort()) */
101 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
102 == NOTIFY_STOP)
103 return;
104 die_if_kernel("bugcheck!", regs, break_num);
105 sig = SIGILL; code = ILL_ILLOPC;
106 break;
107
108 case 1: /* integer divide by zero */
109 sig = SIGFPE; code = FPE_INTDIV;
110 break;
111
112 case 2: /* integer overflow */
113 sig = SIGFPE; code = FPE_INTOVF;
114 break;
115
116 case 3: /* range check/bounds check */
117 sig = SIGFPE; code = FPE_FLTSUB;
118 break;
119
120 case 4: /* null pointer dereference */
121 sig = SIGSEGV; code = SEGV_MAPERR;
122 break;
123
124 case 5: /* misaligned data */
125 sig = SIGSEGV; code = BUS_ADRALN;
126 break;
127
128 case 6: /* decimal overflow */
129 sig = SIGFPE; code = __FPE_DECOVF;
130 break;
131
132 case 7: /* decimal divide by zero */
133 sig = SIGFPE; code = __FPE_DECDIV;
134 break;
135
136 case 8: /* packed decimal error */
137 sig = SIGFPE; code = __FPE_DECERR;
138 break;
139
140 case 9: /* invalid ASCII digit */
141 sig = SIGFPE; code = __FPE_INVASC;
142 break;
143
144 case 10: /* invalid decimal digit */
145 sig = SIGFPE; code = __FPE_INVDEC;
146 break;
147
148 case 11: /* paragraph stack overflow */
149 sig = SIGSEGV; code = __SEGV_PSTKOVF;
150 break;
151
152 case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
153 sig = SIGILL; code = __ILL_BNDMOD;
154 break;
155
156 default:
157 if (break_num < 0x40000 || break_num > 0x100000)
158 die_if_kernel("Bad break", regs, break_num);
159
160 if (break_num < 0x80000) {
161 sig = SIGILL; code = __ILL_BREAK;
162 } else {
163 if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
164 == NOTIFY_STOP)
165 return;
166 sig = SIGTRAP; code = TRAP_BRKPT;
167 }
168 }
169 siginfo.si_signo = sig;
170 siginfo.si_errno = 0;
171 siginfo.si_code = code;
172 force_sig_info(sig, &siginfo, current);
173 }
174
175 /*
176 * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
177 * and it doesn't own the fp-high register partition. When this happens, we save the
178 * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
179 * the fp-high partition of the current task (if necessary). Note that the kernel has
180 * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
181 * care of clearing psr.dfh.
182 */
183 static inline void
184 disabled_fph_fault (struct pt_regs *regs)
185 {
186 struct ia64_psr *psr = ia64_psr(regs);
187
188 /* first, grant user-level access to fph partition: */
189 psr->dfh = 0;
190
191 /*
192 * Make sure that no other task gets in on this processor
193 * while we're claiming the FPU
194 */
195 preempt_disable();
196 #ifndef CONFIG_SMP
197 {
198 struct task_struct *fpu_owner
199 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
200
201 if (ia64_is_local_fpu_owner(current)) {
202 preempt_enable_no_resched();
203 return;
204 }
205
206 if (fpu_owner)
207 ia64_flush_fph(fpu_owner);
208 }
209 #endif /* !CONFIG_SMP */
210 ia64_set_local_fpu_owner(current);
211 if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
212 __ia64_load_fpu(current->thread.fph);
213 psr->mfh = 0;
214 } else {
215 __ia64_init_fpu();
216 /*
217 * Set mfh because the state in thread.fph does not match the state in
218 * the fph partition.
219 */
220 psr->mfh = 1;
221 }
222 preempt_enable_no_resched();
223 }
224
225 static inline int
226 fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
227 struct pt_regs *regs)
228 {
229 fp_state_t fp_state;
230 fpswa_ret_t ret;
231
232 if (!fpswa_interface)
233 return -1;
234
235 memset(&fp_state, 0, sizeof(fp_state_t));
236
237 /*
238 * compute fp_state. only FP registers f6 - f11 are used by the
239 * kernel, so set those bits in the mask and set the low volatile
240 * pointer to point to these registers.
241 */
242 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
243
244 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
245 /*
246 * unsigned long (*EFI_FPSWA) (
247 * unsigned long trap_type,
248 * void *Bundle,
249 * unsigned long *pipsr,
250 * unsigned long *pfsr,
251 * unsigned long *pisr,
252 * unsigned long *ppreds,
253 * unsigned long *pifs,
254 * void *fp_state);
255 */
256 ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
257 (unsigned long *) ipsr, (unsigned long *) fpsr,
258 (unsigned long *) isr, (unsigned long *) pr,
259 (unsigned long *) ifs, &fp_state);
260
261 return ret.status;
262 }
263
264 struct fpu_swa_msg {
265 unsigned long count;
266 unsigned long time;
267 };
268 static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
269 DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
270 static struct fpu_swa_msg last __cacheline_aligned;
271
272
273 /*
274 * Handle floating-point assist faults and traps.
275 */
276 static int
277 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
278 {
279 long exception, bundle[2];
280 unsigned long fault_ip;
281 struct siginfo siginfo;
282
283 fault_ip = regs->cr_iip;
284 if (!fp_fault && (ia64_psr(regs)->ri == 0))
285 fault_ip -= 16;
286 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
287 return -1;
288
289 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
290 unsigned long count, current_jiffies = jiffies;
291 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
292
293 if (unlikely(current_jiffies > cp->time))
294 cp->count = 0;
295 if (unlikely(cp->count < 5)) {
296 cp->count++;
297 cp->time = current_jiffies + 5 * HZ;
298
299 /* minimize races by grabbing a copy of count BEFORE checking last.time. */
300 count = last.count;
301 barrier();
302
303 /*
304 * Lower 4 bits are used as a count. Upper bits are a sequence
305 * number that is updated when count is reset. The cmpxchg will
306 * fail is seqno has changed. This minimizes mutiple cpus
307 * reseting the count.
308 */
309 if (current_jiffies > last.time)
310 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
311
312 /* used fetchadd to atomically update the count */
313 if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
314 last.time = current_jiffies + 5 * HZ;
315 printk(KERN_WARNING
316 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
317 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
318 }
319 }
320 }
321
322 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
323 &regs->cr_ifs, regs);
324 if (fp_fault) {
325 if (exception == 0) {
326 /* emulation was successful */
327 ia64_increment_ip(regs);
328 } else if (exception == -1) {
329 printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
330 return -1;
331 } else {
332 /* is next instruction a trap? */
333 if (exception & 2) {
334 ia64_increment_ip(regs);
335 }
336 siginfo.si_signo = SIGFPE;
337 siginfo.si_errno = 0;
338 siginfo.si_code = __SI_FAULT; /* default code */
339 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
340 if (isr & 0x11) {
341 siginfo.si_code = FPE_FLTINV;
342 } else if (isr & 0x22) {
343 /* denormal operand gets the same si_code as underflow
344 * see arch/i386/kernel/traps.c:math_error() */
345 siginfo.si_code = FPE_FLTUND;
346 } else if (isr & 0x44) {
347 siginfo.si_code = FPE_FLTDIV;
348 }
349 siginfo.si_isr = isr;
350 siginfo.si_flags = __ISR_VALID;
351 siginfo.si_imm = 0;
352 force_sig_info(SIGFPE, &siginfo, current);
353 }
354 } else {
355 if (exception == -1) {
356 printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
357 return -1;
358 } else if (exception != 0) {
359 /* raise exception */
360 siginfo.si_signo = SIGFPE;
361 siginfo.si_errno = 0;
362 siginfo.si_code = __SI_FAULT; /* default code */
363 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
364 if (isr & 0x880) {
365 siginfo.si_code = FPE_FLTOVF;
366 } else if (isr & 0x1100) {
367 siginfo.si_code = FPE_FLTUND;
368 } else if (isr & 0x2200) {
369 siginfo.si_code = FPE_FLTRES;
370 }
371 siginfo.si_isr = isr;
372 siginfo.si_flags = __ISR_VALID;
373 siginfo.si_imm = 0;
374 force_sig_info(SIGFPE, &siginfo, current);
375 }
376 }
377 return 0;
378 }
379
380 struct illegal_op_return {
381 unsigned long fkt, arg1, arg2, arg3;
382 };
383
384 struct illegal_op_return
385 ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
386 long arg4, long arg5, long arg6, long arg7,
387 struct pt_regs regs)
388 {
389 struct illegal_op_return rv;
390 struct siginfo si;
391 char buf[128];
392
393 #ifdef CONFIG_IA64_BRL_EMU
394 {
395 extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
396
397 rv = ia64_emulate_brl(&regs, ec);
398 if (rv.fkt != (unsigned long) -1)
399 return rv;
400 }
401 #endif
402
403 sprintf(buf, "IA-64 Illegal operation fault");
404 die_if_kernel(buf, &regs, 0);
405
406 memset(&si, 0, sizeof(si));
407 si.si_signo = SIGILL;
408 si.si_code = ILL_ILLOPC;
409 si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
410 force_sig_info(SIGILL, &si, current);
411 rv.fkt = 0;
412 return rv;
413 }
414
415 void __kprobes
416 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
417 unsigned long iim, unsigned long itir, long arg5, long arg6,
418 long arg7, struct pt_regs regs)
419 {
420 unsigned long code, error = isr, iip;
421 struct siginfo siginfo;
422 char buf[128];
423 int result, sig;
424 static const char *reason[] = {
425 "IA-64 Illegal Operation fault",
426 "IA-64 Privileged Operation fault",
427 "IA-64 Privileged Register fault",
428 "IA-64 Reserved Register/Field fault",
429 "Disabled Instruction Set Transition fault",
430 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
431 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
432 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
433 };
434
435 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
436 /*
437 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
438 * the lfetch.
439 */
440 ia64_psr(&regs)->ed = 1;
441 return;
442 }
443
444 iip = regs.cr_iip + ia64_psr(&regs)->ri;
445
446 switch (vector) {
447 case 24: /* General Exception */
448 code = (isr >> 4) & 0xf;
449 sprintf(buf, "General Exception: %s%s", reason[code],
450 (code == 3) ? ((isr & (1UL << 37))
451 ? " (RSE access)" : " (data access)") : "");
452 if (code == 8) {
453 # ifdef CONFIG_IA64_PRINT_HAZARDS
454 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
455 current->comm, current->pid,
456 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
457 # endif
458 return;
459 }
460 break;
461
462 case 25: /* Disabled FP-Register */
463 if (isr & 2) {
464 disabled_fph_fault(&regs);
465 return;
466 }
467 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
468 break;
469
470 case 26: /* NaT Consumption */
471 if (user_mode(&regs)) {
472 void __user *addr;
473
474 if (((isr >> 4) & 0xf) == 2) {
475 /* NaT page consumption */
476 sig = SIGSEGV;
477 code = SEGV_ACCERR;
478 addr = (void __user *) ifa;
479 } else {
480 /* register NaT consumption */
481 sig = SIGILL;
482 code = ILL_ILLOPN;
483 addr = (void __user *) (regs.cr_iip
484 + ia64_psr(&regs)->ri);
485 }
486 siginfo.si_signo = sig;
487 siginfo.si_code = code;
488 siginfo.si_errno = 0;
489 siginfo.si_addr = addr;
490 siginfo.si_imm = vector;
491 siginfo.si_flags = __ISR_VALID;
492 siginfo.si_isr = isr;
493 force_sig_info(sig, &siginfo, current);
494 return;
495 } else if (ia64_done_with_exception(&regs))
496 return;
497 sprintf(buf, "NaT consumption");
498 break;
499
500 case 31: /* Unsupported Data Reference */
501 if (user_mode(&regs)) {
502 siginfo.si_signo = SIGILL;
503 siginfo.si_code = ILL_ILLOPN;
504 siginfo.si_errno = 0;
505 siginfo.si_addr = (void __user *) iip;
506 siginfo.si_imm = vector;
507 siginfo.si_flags = __ISR_VALID;
508 siginfo.si_isr = isr;
509 force_sig_info(SIGILL, &siginfo, current);
510 return;
511 }
512 sprintf(buf, "Unsupported data reference");
513 break;
514
515 case 29: /* Debug */
516 case 35: /* Taken Branch Trap */
517 case 36: /* Single Step Trap */
518 if (fsys_mode(current, &regs)) {
519 extern char __kernel_syscall_via_break[];
520 /*
521 * Got a trap in fsys-mode: Taken Branch Trap
522 * and Single Step trap need special handling;
523 * Debug trap is ignored (we disable it here
524 * and re-enable it in the lower-privilege trap).
525 */
526 if (unlikely(vector == 29)) {
527 set_thread_flag(TIF_DB_DISABLED);
528 ia64_psr(&regs)->db = 0;
529 ia64_psr(&regs)->lp = 1;
530 return;
531 }
532 /* re-do the system call via break 0x100000: */
533 regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
534 ia64_psr(&regs)->ri = 0;
535 ia64_psr(&regs)->cpl = 3;
536 return;
537 }
538 switch (vector) {
539 case 29:
540 siginfo.si_code = TRAP_HWBKPT;
541 #ifdef CONFIG_ITANIUM
542 /*
543 * Erratum 10 (IFA may contain incorrect address) now has
544 * "NoFix" status. There are no plans for fixing this.
545 */
546 if (ia64_psr(&regs)->is == 0)
547 ifa = regs.cr_iip;
548 #endif
549 break;
550 case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
551 case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
552 }
553 if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
554 == NOTIFY_STOP)
555 return;
556 siginfo.si_signo = SIGTRAP;
557 siginfo.si_errno = 0;
558 siginfo.si_addr = (void __user *) ifa;
559 siginfo.si_imm = 0;
560 siginfo.si_flags = __ISR_VALID;
561 siginfo.si_isr = isr;
562 force_sig_info(SIGTRAP, &siginfo, current);
563 return;
564
565 case 32: /* fp fault */
566 case 33: /* fp trap */
567 result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
568 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
569 siginfo.si_signo = SIGFPE;
570 siginfo.si_errno = 0;
571 siginfo.si_code = FPE_FLTINV;
572 siginfo.si_addr = (void __user *) iip;
573 siginfo.si_flags = __ISR_VALID;
574 siginfo.si_isr = isr;
575 siginfo.si_imm = 0;
576 force_sig_info(SIGFPE, &siginfo, current);
577 }
578 return;
579
580 case 34:
581 if (isr & 0x2) {
582 /* Lower-Privilege Transfer Trap */
583
584 /* If we disabled debug traps during an fsyscall,
585 * re-enable them here.
586 */
587 if (test_thread_flag(TIF_DB_DISABLED)) {
588 clear_thread_flag(TIF_DB_DISABLED);
589 ia64_psr(&regs)->db = 1;
590 }
591
592 /*
593 * Just clear PSR.lp and then return immediately:
594 * all the interesting work (e.g., signal delivery)
595 * is done in the kernel exit path.
596 */
597 ia64_psr(&regs)->lp = 0;
598 return;
599 } else {
600 /* Unimplemented Instr. Address Trap */
601 if (user_mode(&regs)) {
602 siginfo.si_signo = SIGILL;
603 siginfo.si_code = ILL_BADIADDR;
604 siginfo.si_errno = 0;
605 siginfo.si_flags = 0;
606 siginfo.si_isr = 0;
607 siginfo.si_imm = 0;
608 siginfo.si_addr = (void __user *) iip;
609 force_sig_info(SIGILL, &siginfo, current);
610 return;
611 }
612 sprintf(buf, "Unimplemented Instruction Address fault");
613 }
614 break;
615
616 case 45:
617 #ifdef CONFIG_IA32_SUPPORT
618 if (ia32_exception(&regs, isr) == 0)
619 return;
620 #endif
621 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
622 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
623 iip, ifa, isr);
624 force_sig(SIGSEGV, current);
625 break;
626
627 case 46:
628 #ifdef CONFIG_IA32_SUPPORT
629 if (ia32_intercept(&regs, isr) == 0)
630 return;
631 #endif
632 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
633 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
634 iip, ifa, isr, iim);
635 force_sig(SIGSEGV, current);
636 return;
637
638 case 47:
639 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
640 break;
641
642 default:
643 sprintf(buf, "Fault %lu", vector);
644 break;
645 }
646 die_if_kernel(buf, &regs, error);
647 force_sig(SIGILL, current);
648 }
This page took 0.043724 seconds and 5 git commands to generate.