2 * linux/arch/x86-64/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/nmi.h>
29 #include <linux/kprobes.h>
30 #include <linux/kexec.h>
31 #include <linux/unwind.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
36 #include <asm/atomic.h>
37 #include <asm/debugreg.h>
40 #include <asm/kdebug.h>
41 #include <asm/processor.h>
42 #include <asm/unwind.h>
44 #include <asm/pgalloc.h>
46 #include <asm/proto.h>
49 asmlinkage
void divide_error(void);
50 asmlinkage
void debug(void);
51 asmlinkage
void nmi(void);
52 asmlinkage
void int3(void);
53 asmlinkage
void overflow(void);
54 asmlinkage
void bounds(void);
55 asmlinkage
void invalid_op(void);
56 asmlinkage
void device_not_available(void);
57 asmlinkage
void double_fault(void);
58 asmlinkage
void coprocessor_segment_overrun(void);
59 asmlinkage
void invalid_TSS(void);
60 asmlinkage
void segment_not_present(void);
61 asmlinkage
void stack_segment(void);
62 asmlinkage
void general_protection(void);
63 asmlinkage
void page_fault(void);
64 asmlinkage
void coprocessor_error(void);
65 asmlinkage
void simd_coprocessor_error(void);
66 asmlinkage
void reserved(void);
67 asmlinkage
void alignment_check(void);
68 asmlinkage
void machine_check(void);
69 asmlinkage
void spurious_interrupt_bug(void);
71 ATOMIC_NOTIFIER_HEAD(die_chain
);
72 EXPORT_SYMBOL(die_chain
);
74 int register_die_notifier(struct notifier_block
*nb
)
77 return atomic_notifier_chain_register(&die_chain
, nb
);
79 EXPORT_SYMBOL(register_die_notifier
); /* used modular by kdb */
81 int unregister_die_notifier(struct notifier_block
*nb
)
83 return atomic_notifier_chain_unregister(&die_chain
, nb
);
85 EXPORT_SYMBOL(unregister_die_notifier
); /* used modular by kdb */
87 static inline void conditional_sti(struct pt_regs
*regs
)
89 if (regs
->eflags
& X86_EFLAGS_IF
)
93 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
96 if (regs
->eflags
& X86_EFLAGS_IF
)
100 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
102 if (regs
->eflags
& X86_EFLAGS_IF
)
104 /* Make sure to not schedule here because we could be running
105 on an exception stack. */
106 preempt_enable_no_resched();
109 static int kstack_depth_to_print
= 12;
110 static int call_trace
= 1;
112 #ifdef CONFIG_KALLSYMS
113 # include <linux/kallsyms.h>
114 void printk_address(unsigned long address
)
116 unsigned long offset
= 0, symsize
;
122 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
125 printk(" [<%016lx>]\n", address
);
129 modname
= delim
= "";
130 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
131 address
, delim
, modname
, delim
, symname
, offset
, symsize
);
134 void printk_address(unsigned long address
)
136 printk(" [<%016lx>]\n", address
);
140 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
141 unsigned *usedp
, const char **idp
)
143 static char ids
[][8] = {
144 [DEBUG_STACK
- 1] = "#DB",
145 [NMI_STACK
- 1] = "NMI",
146 [DOUBLEFAULT_STACK
- 1] = "#DF",
147 [STACKFAULT_STACK
- 1] = "#SS",
148 [MCE_STACK
- 1] = "#MC",
149 #if DEBUG_STKSZ > EXCEPTION_STKSZ
150 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
156 * Iterate over all exception stacks, and figure out whether
157 * 'stack' is in one of them:
159 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
163 * set 'end' to the end of the exception stack.
167 * TODO: this block is not needed i think, because
168 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
171 #if DEBUG_STKSZ > EXCEPTION_STKSZ
173 end
= cpu_pda(cpu
)->debugstack
+ DEBUG_STKSZ
;
177 end
= per_cpu(init_tss
, cpu
).ist
[k
];
181 * Is 'stack' above this exception frame's end?
182 * If yes then skip to the next frame.
187 * Is 'stack' above this exception frame's start address?
188 * If yes then we found the right frame.
190 if (stack
>= end
- EXCEPTION_STKSZ
) {
192 * Make sure we only iterate through an exception
193 * stack once. If it comes up for the second time
194 * then there's something wrong going on - just
195 * break out and return NULL:
197 if (*usedp
& (1U << k
))
201 return (unsigned long *)end
;
204 * If this is a debug stack, and if it has a larger size than
205 * the usual exception stacks, then 'stack' might still
206 * be within the lower portion of the debug stack:
208 #if DEBUG_STKSZ > EXCEPTION_STKSZ
209 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
210 unsigned j
= N_EXCEPTION_STACKS
- 1;
213 * Black magic. A large debug stack is composed of
214 * multiple exception stack entries, which we
215 * iterate through now. Dont look:
219 end
-= EXCEPTION_STKSZ
;
220 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
221 } while (stack
< end
- EXCEPTION_STKSZ
);
222 if (*usedp
& (1U << j
))
226 return (unsigned long *)end
;
233 static int show_trace_unwind(struct unwind_frame_info
*info
, void *context
)
237 while (unwind(info
) == 0 && UNW_PC(info
)) {
239 printk_address(UNW_PC(info
));
240 if (arch_unw_user_mode(info
))
247 * x86-64 can have upto three kernel stacks:
250 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
253 void show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * stack
)
255 const unsigned cpu
= safe_smp_processor_id();
256 unsigned long *irqstack_end
= (unsigned long *)cpu_pda(cpu
)->irqstackptr
;
259 printk("\nCall Trace:\n");
264 if (call_trace
>= 0) {
266 struct unwind_frame_info info
;
269 if (unwind_init_frame_info(&info
, tsk
, regs
) == 0)
270 unw_ret
= show_trace_unwind(&info
, NULL
);
271 } else if (tsk
== current
)
272 unw_ret
= unwind_init_running(&info
, show_trace_unwind
, NULL
);
274 if (unwind_init_blocked(&info
, tsk
) == 0)
275 unw_ret
= show_trace_unwind(&info
, NULL
);
277 if (unw_ret
> 0 && !arch_unw_user_mode(&info
)) {
278 #ifdef CONFIG_STACK_UNWIND
279 unsigned long rip
= info
.regs
.rip
;
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip
);
281 if (call_trace
== 1) {
282 printk("Leftover inexact backtrace:\n");
283 stack
= (unsigned long *)info
.regs
.rsp
;
284 } else if (call_trace
> 1)
287 printk("Full inexact backtrace again:\n");
289 printk("Inexact backtrace:\n");
295 * Print function call entries within a stack. 'cond' is the
296 * "end of stackframe" condition, that the 'stack++'
297 * iteration will eventually trigger.
299 #define HANDLE_STACK(cond) \
301 unsigned long addr = *stack++; \
302 if (kernel_text_address(addr)) { \
304 * If the address is either in the text segment of the \
305 * kernel, or in the region which contains vmalloc'ed \
306 * memory, it *may* be the address of a calling \
307 * routine; if so, print it so that someone tracing \
308 * down the cause of the crash will be able to figure \
309 * out the call path that was taken. \
311 printk_address(addr); \
316 * Print function call entries in all stacks, starting at the
317 * current stack address. If the stacks consist of nested
322 unsigned long *estack_end
;
323 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
328 HANDLE_STACK (stack
< estack_end
);
331 * We link to the next stack via the
332 * second-to-last pointer (index -2 to end) in the
335 stack
= (unsigned long *) estack_end
[-2];
339 unsigned long *irqstack
;
340 irqstack
= irqstack_end
-
341 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
343 if (stack
>= irqstack
&& stack
< irqstack_end
) {
345 HANDLE_STACK (stack
< irqstack_end
);
347 * We link to the next stack (which would be
348 * the process stack normally) the last
349 * pointer (index -1 to end) in the IRQ stack:
351 stack
= (unsigned long *) (irqstack_end
[-1]);
361 * This prints the process stack:
363 HANDLE_STACK (((long) stack
& (THREAD_SIZE
-1)) != 0);
369 static void _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * rsp
)
371 unsigned long *stack
;
373 const int cpu
= safe_smp_processor_id();
374 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
375 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
377 // debugging aid: "show_stack(NULL, NULL);" prints the
378 // back trace for this cpu.
382 rsp
= (unsigned long *)tsk
->thread
.rsp
;
384 rsp
= (unsigned long *)&rsp
;
388 for(i
=0; i
< kstack_depth_to_print
; i
++) {
389 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
390 if (stack
== irqstack_end
) {
391 stack
= (unsigned long *) (irqstack_end
[-1]);
395 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
398 if (i
&& ((i
% 4) == 0))
400 printk(" %016lx", *stack
++);
401 touch_nmi_watchdog();
403 show_trace(tsk
, regs
, rsp
);
406 void show_stack(struct task_struct
*tsk
, unsigned long * rsp
)
408 _show_stack(tsk
, NULL
, rsp
);
412 * The architecture-independent dump_stack generator
414 void dump_stack(void)
417 show_trace(NULL
, NULL
, &dummy
);
420 EXPORT_SYMBOL(dump_stack
);
422 void show_registers(struct pt_regs
*regs
)
425 int in_kernel
= !user_mode(regs
);
427 const int cpu
= safe_smp_processor_id();
428 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
432 printk("CPU %d ", cpu
);
434 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
435 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
438 * When in-kernel, we also print out the stack and code at the
439 * time of the fault..
444 _show_stack(NULL
, regs
, (unsigned long*)rsp
);
447 if (regs
->rip
< PAGE_OFFSET
)
450 for (i
=0; i
<20; i
++) {
452 if (__get_user(c
, &((unsigned char*)regs
->rip
)[i
])) {
454 printk(" Bad RIP value.");
463 void handle_BUG(struct pt_regs
*regs
)
467 const char *prefix
= "";
471 if (__copy_from_user(&f
, (const void __user
*) regs
->rip
,
472 sizeof(struct bug_frame
)))
474 if (f
.filename
>= 0 ||
475 f
.ud2
[0] != 0x0f || f
.ud2
[1] != 0x0b)
477 len
= __strnlen_user((char *)(long)f
.filename
, PATH_MAX
) - 1;
478 if (len
< 0 || len
>= PATH_MAX
)
479 f
.filename
= (int)(long)"unmapped filename";
481 f
.filename
+= len
- 50;
484 printk("----------- [cut here ] --------- [please bite here ] ---------\n");
485 printk(KERN_ALERT
"Kernel BUG at %s%.50s:%d\n", prefix
, (char *)(long)f
.filename
, f
.line
);
489 void out_of_line_bug(void)
493 EXPORT_SYMBOL(out_of_line_bug
);
496 static DEFINE_SPINLOCK(die_lock
);
497 static int die_owner
= -1;
498 static unsigned int die_nest_count
;
500 unsigned __kprobes
long oops_begin(void)
502 int cpu
= safe_smp_processor_id();
505 /* racy, but better than risking deadlock. */
506 local_irq_save(flags
);
507 if (!spin_trylock(&die_lock
)) {
508 if (cpu
== die_owner
)
509 /* nested oops. should stop eventually */;
511 spin_lock(&die_lock
);
520 void __kprobes
oops_end(unsigned long flags
)
526 /* We still own the lock */
527 local_irq_restore(flags
);
529 /* Nest count reaches zero, release the lock. */
530 spin_unlock_irqrestore(&die_lock
, flags
);
532 panic("Fatal exception: panic_on_oops");
535 void __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
537 static int die_counter
;
538 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
539 #ifdef CONFIG_PREEMPT
545 #ifdef CONFIG_DEBUG_PAGEALLOC
546 printk("DEBUG_PAGEALLOC");
549 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
550 show_registers(regs
);
551 /* Executive summary in case the oops scrolled away */
552 printk(KERN_ALERT
"RIP ");
553 printk_address(regs
->rip
);
554 printk(" RSP <%016lx>\n", regs
->rsp
);
555 if (kexec_should_crash(current
))
559 void die(const char * str
, struct pt_regs
* regs
, long err
)
561 unsigned long flags
= oops_begin();
564 __die(str
, regs
, err
);
569 void __kprobes
die_nmi(char *str
, struct pt_regs
*regs
)
571 unsigned long flags
= oops_begin();
574 * We are in trouble anyway, lets at least try
575 * to get a message out.
577 printk(str
, safe_smp_processor_id());
578 show_registers(regs
);
579 if (kexec_should_crash(current
))
581 if (panic_on_timeout
|| panic_on_oops
)
582 panic("nmi watchdog");
583 printk("console shuts up ...\n");
590 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
591 struct pt_regs
* regs
, long error_code
,
594 struct task_struct
*tsk
= current
;
596 tsk
->thread
.error_code
= error_code
;
597 tsk
->thread
.trap_no
= trapnr
;
599 if (user_mode(regs
)) {
600 if (exception_trace
&& unhandled_signal(tsk
, signr
))
602 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
603 tsk
->comm
, tsk
->pid
, str
,
604 regs
->rip
, regs
->rsp
, error_code
);
607 force_sig_info(signr
, info
, tsk
);
609 force_sig(signr
, tsk
);
616 const struct exception_table_entry
*fixup
;
617 fixup
= search_exception_tables(regs
->rip
);
619 regs
->rip
= fixup
->fixup
;
621 die(str
, regs
, error_code
);
626 #define DO_ERROR(trapnr, signr, str, name) \
627 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
629 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
632 conditional_sti(regs); \
633 do_trap(trapnr, signr, str, regs, error_code, NULL); \
636 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
637 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
640 info.si_signo = signr; \
642 info.si_code = sicode; \
643 info.si_addr = (void __user *)siaddr; \
644 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
647 conditional_sti(regs); \
648 do_trap(trapnr, signr, str, regs, error_code, &info); \
651 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->rip
)
652 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
653 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
654 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->rip
)
655 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
656 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
657 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
658 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
659 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
660 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
662 /* Runs on IST stack */
663 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
665 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
666 12, SIGBUS
) == NOTIFY_STOP
)
668 preempt_conditional_sti(regs
);
669 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
670 preempt_conditional_cli(regs
);
673 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
675 static const char str
[] = "double fault";
676 struct task_struct
*tsk
= current
;
678 /* Return not checked because double check cannot be ignored */
679 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
681 tsk
->thread
.error_code
= error_code
;
682 tsk
->thread
.trap_no
= 8;
684 /* This is always a kernel trap and never fixable (and thus must
687 die(str
, regs
, error_code
);
690 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
693 struct task_struct
*tsk
= current
;
695 conditional_sti(regs
);
697 tsk
->thread
.error_code
= error_code
;
698 tsk
->thread
.trap_no
= 13;
700 if (user_mode(regs
)) {
701 if (exception_trace
&& unhandled_signal(tsk
, SIGSEGV
))
703 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
705 regs
->rip
, regs
->rsp
, error_code
);
707 force_sig(SIGSEGV
, tsk
);
713 const struct exception_table_entry
*fixup
;
714 fixup
= search_exception_tables(regs
->rip
);
716 regs
->rip
= fixup
->fixup
;
719 if (notify_die(DIE_GPF
, "general protection fault", regs
,
720 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
722 die("general protection fault", regs
, error_code
);
726 static __kprobes
void
727 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
729 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
730 printk("You probably have a hardware problem with your RAM chips\n");
732 /* Clear and disable the memory parity error line. */
733 reason
= (reason
& 0xf) | 4;
737 static __kprobes
void
738 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
740 printk("NMI: IOCK error (debug interrupt?)\n");
741 show_registers(regs
);
743 /* Re-enable the IOCK line, wait for a few seconds */
744 reason
= (reason
& 0xf) | 8;
751 static __kprobes
void
752 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
753 { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason
);
754 printk("Dazed and confused, but trying to continue\n");
755 printk("Do you have a strange power saving mode enabled?\n");
758 /* Runs on IST stack. This code must keep interrupts off all the time.
759 Nested NMIs are prevented by the CPU. */
760 asmlinkage __kprobes
void default_do_nmi(struct pt_regs
*regs
)
762 unsigned char reason
= 0;
765 cpu
= smp_processor_id();
767 /* Only the BSP gets external NMIs from the system. */
769 reason
= get_nmi_reason();
771 if (!(reason
& 0xc0)) {
772 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
775 #ifdef CONFIG_X86_LOCAL_APIC
777 * Ok, so this is none of the documented NMI sources,
778 * so it must be the NMI watchdog.
780 if (nmi_watchdog
> 0) {
781 nmi_watchdog_tick(regs
,reason
);
785 unknown_nmi_error(reason
, regs
);
788 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
791 /* AK: following checks seem to be broken on modern chipsets. FIXME */
794 mem_parity_error(reason
, regs
);
796 io_check_error(reason
, regs
);
799 /* runs on IST stack. */
800 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
802 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
805 preempt_conditional_sti(regs
);
806 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
807 preempt_conditional_cli(regs
);
810 /* Help handler running on IST stack to switch back to user stack
811 for scheduling or signal handling. The actual stack switch is done in
813 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
815 struct pt_regs
*regs
= eregs
;
816 /* Did already sync */
817 if (eregs
== (struct pt_regs
*)eregs
->rsp
)
819 /* Exception from user space */
820 else if (user_mode(eregs
))
821 regs
= task_pt_regs(current
);
822 /* Exception from kernel and interrupts are enabled. Move to
823 kernel process stack. */
824 else if (eregs
->eflags
& X86_EFLAGS_IF
)
825 regs
= (struct pt_regs
*)(eregs
->rsp
-= sizeof(struct pt_regs
));
831 /* runs on IST stack. */
832 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
833 unsigned long error_code
)
835 unsigned long condition
;
836 struct task_struct
*tsk
= current
;
839 get_debugreg(condition
, 6);
841 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
842 SIGTRAP
) == NOTIFY_STOP
)
845 preempt_conditional_sti(regs
);
847 /* Mask out spurious debug traps due to lazy DR7 setting */
848 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
849 if (!tsk
->thread
.debugreg7
) {
854 tsk
->thread
.debugreg6
= condition
;
856 /* Mask out spurious TF errors due to lazy TF clearing */
857 if (condition
& DR_STEP
) {
859 * The TF error should be masked out only if the current
860 * process is not traced and if the TRAP flag has been set
861 * previously by a tracing process (condition detected by
862 * the PT_DTRACE flag); remember that the i386 TRAP flag
863 * can be modified by the process itself in user mode,
864 * allowing programs to debug themselves without the ptrace()
867 if (!user_mode(regs
))
868 goto clear_TF_reenable
;
870 * Was the TF flag set by a debugger? If so, clear it now,
871 * so that register information is correct.
873 if (tsk
->ptrace
& PT_DTRACE
) {
874 regs
->eflags
&= ~TF_MASK
;
875 tsk
->ptrace
&= ~PT_DTRACE
;
879 /* Ok, finally something we can handle */
880 tsk
->thread
.trap_no
= 1;
881 tsk
->thread
.error_code
= error_code
;
882 info
.si_signo
= SIGTRAP
;
884 info
.si_code
= TRAP_BRKPT
;
885 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->rip
: NULL
;
886 force_sig_info(SIGTRAP
, &info
, tsk
);
889 set_debugreg(0UL, 7);
890 preempt_conditional_cli(regs
);
894 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
895 regs
->eflags
&= ~TF_MASK
;
896 preempt_conditional_cli(regs
);
899 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
901 const struct exception_table_entry
*fixup
;
902 fixup
= search_exception_tables(regs
->rip
);
904 regs
->rip
= fixup
->fixup
;
907 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
908 /* Illegal floating point operation in the kernel */
909 current
->thread
.trap_no
= trapnr
;
915 * Note that we play around with the 'TS' bit in an attempt to get
916 * the correct behaviour even in the presence of the asynchronous
919 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
921 void __user
*rip
= (void __user
*)(regs
->rip
);
922 struct task_struct
* task
;
924 unsigned short cwd
, swd
;
926 conditional_sti(regs
);
927 if (!user_mode(regs
) &&
928 kernel_math_error(regs
, "kernel x87 math error", 16))
932 * Save the info for the exception handler and clear the error.
936 task
->thread
.trap_no
= 16;
937 task
->thread
.error_code
= 0;
938 info
.si_signo
= SIGFPE
;
940 info
.si_code
= __SI_FAULT
;
943 * (~cwd & swd) will mask out exceptions that are not set to unmasked
944 * status. 0x3f is the exception bits in these regs, 0x200 is the
945 * C1 reg you need in case of a stack fault, 0x040 is the stack
946 * fault bit. We should only be taking one exception at a time,
947 * so if this combination doesn't produce any single exception,
948 * then we have a bad program that isn't synchronizing its FPU usage
949 * and it will suffer the consequences since we won't be able to
950 * fully reproduce the context of the exception
952 cwd
= get_fpu_cwd(task
);
953 swd
= get_fpu_swd(task
);
954 switch (swd
& ~cwd
& 0x3f) {
958 case 0x001: /* Invalid Op */
960 * swd & 0x240 == 0x040: Stack Underflow
961 * swd & 0x240 == 0x240: Stack Overflow
962 * User must clear the SF bit (0x40) if set
964 info
.si_code
= FPE_FLTINV
;
966 case 0x002: /* Denormalize */
967 case 0x010: /* Underflow */
968 info
.si_code
= FPE_FLTUND
;
970 case 0x004: /* Zero Divide */
971 info
.si_code
= FPE_FLTDIV
;
973 case 0x008: /* Overflow */
974 info
.si_code
= FPE_FLTOVF
;
976 case 0x020: /* Precision */
977 info
.si_code
= FPE_FLTRES
;
980 force_sig_info(SIGFPE
, &info
, task
);
983 asmlinkage
void bad_intr(void)
985 printk("bad interrupt");
988 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
990 void __user
*rip
= (void __user
*)(regs
->rip
);
991 struct task_struct
* task
;
993 unsigned short mxcsr
;
995 conditional_sti(regs
);
996 if (!user_mode(regs
) &&
997 kernel_math_error(regs
, "kernel simd math error", 19))
1001 * Save the info for the exception handler and clear the error.
1004 save_init_fpu(task
);
1005 task
->thread
.trap_no
= 19;
1006 task
->thread
.error_code
= 0;
1007 info
.si_signo
= SIGFPE
;
1009 info
.si_code
= __SI_FAULT
;
1012 * The SIMD FPU exceptions are handled a little differently, as there
1013 * is only a single status/control register. Thus, to determine which
1014 * unmasked exception was caught we must mask the exception mask bits
1015 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1017 mxcsr
= get_fpu_mxcsr(task
);
1018 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1022 case 0x001: /* Invalid Op */
1023 info
.si_code
= FPE_FLTINV
;
1025 case 0x002: /* Denormalize */
1026 case 0x010: /* Underflow */
1027 info
.si_code
= FPE_FLTUND
;
1029 case 0x004: /* Zero Divide */
1030 info
.si_code
= FPE_FLTDIV
;
1032 case 0x008: /* Overflow */
1033 info
.si_code
= FPE_FLTOVF
;
1035 case 0x020: /* Precision */
1036 info
.si_code
= FPE_FLTRES
;
1039 force_sig_info(SIGFPE
, &info
, task
);
1042 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1046 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1050 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1055 * 'math_state_restore()' saves the current math information in the
1056 * old math state array, and gets the new ones from the current task
1058 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1059 * Don't touch unless you *really* know how it works.
1061 asmlinkage
void math_state_restore(void)
1063 struct task_struct
*me
= current
;
1064 clts(); /* Allow maths ops (or we recurse) */
1068 restore_fpu_checking(&me
->thread
.i387
.fxsave
);
1069 task_thread_info(me
)->status
|= TS_USEDFPU
;
1072 void __init
trap_init(void)
1074 set_intr_gate(0,÷_error
);
1075 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1076 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1077 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1078 set_system_gate(4,&overflow
); /* int4 can be called from all */
1079 set_intr_gate(5,&bounds
);
1080 set_intr_gate(6,&invalid_op
);
1081 set_intr_gate(7,&device_not_available
);
1082 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1083 set_intr_gate(9,&coprocessor_segment_overrun
);
1084 set_intr_gate(10,&invalid_TSS
);
1085 set_intr_gate(11,&segment_not_present
);
1086 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1087 set_intr_gate(13,&general_protection
);
1088 set_intr_gate(14,&page_fault
);
1089 set_intr_gate(15,&spurious_interrupt_bug
);
1090 set_intr_gate(16,&coprocessor_error
);
1091 set_intr_gate(17,&alignment_check
);
1092 #ifdef CONFIG_X86_MCE
1093 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1095 set_intr_gate(19,&simd_coprocessor_error
);
1097 #ifdef CONFIG_IA32_EMULATION
1098 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1102 * Should be a barrier for any external CPU state.
1108 /* Actual parsing is done early in setup.c. */
1109 static int __init
oops_dummy(char *s
)
1114 __setup("oops=", oops_dummy
);
1116 static int __init
kstack_setup(char *s
)
1118 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1121 __setup("kstack=", kstack_setup
);
1123 static int __init
call_trace_setup(char *s
)
1125 if (strcmp(s
, "old") == 0)
1127 else if (strcmp(s
, "both") == 0)
1129 else if (strcmp(s
, "newfallback") == 0)
1131 else if (strcmp(s
, "new") == 0)
1135 __setup("call_trace=", call_trace_setup
);