2 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * 'Traps.c' handles hardware traps and faults after we have saved some
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/timer.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/highmem.h>
23 #include <linux/kallsyms.h>
24 #include <linux/ptrace.h>
25 #include <linux/utsname.h>
26 #include <linux/kprobes.h>
27 #include <linux/kexec.h>
28 #include <linux/unwind.h>
29 #include <linux/uaccess.h>
30 #include <linux/nmi.h>
31 #include <linux/bug.h>
34 #include <linux/ioport.h>
35 #include <linux/eisa.h>
39 #include <linux/mca.h>
42 #if defined(CONFIG_EDAC)
43 #include <linux/edac.h>
46 #include <asm/processor.h>
47 #include <asm/system.h>
49 #include <asm/atomic.h>
50 #include <asm/debugreg.h>
54 #include <asm/unwind.h>
56 #include <asm/arch_hooks.h>
57 #include <linux/kdebug.h>
58 #include <asm/stacktrace.h>
60 #include <linux/module.h>
62 #include "mach_traps.h"
64 int panic_on_unrecovered_nmi
;
66 DECLARE_BITMAP(used_vectors
, NR_VECTORS
);
67 EXPORT_SYMBOL_GPL(used_vectors
);
69 asmlinkage
int system_call(void);
71 /* Do we ignore FPU interrupts ? */
72 char ignore_fpu_irq
= 0;
75 * The IDT has to be page-aligned to simplify the Pentium
76 * F0 0F bug workaround.. We have a special link segment
79 gate_desc idt_table
[256]
80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
82 asmlinkage
void divide_error(void);
83 asmlinkage
void debug(void);
84 asmlinkage
void nmi(void);
85 asmlinkage
void int3(void);
86 asmlinkage
void overflow(void);
87 asmlinkage
void bounds(void);
88 asmlinkage
void invalid_op(void);
89 asmlinkage
void device_not_available(void);
90 asmlinkage
void coprocessor_segment_overrun(void);
91 asmlinkage
void invalid_TSS(void);
92 asmlinkage
void segment_not_present(void);
93 asmlinkage
void stack_segment(void);
94 asmlinkage
void general_protection(void);
95 asmlinkage
void page_fault(void);
96 asmlinkage
void coprocessor_error(void);
97 asmlinkage
void simd_coprocessor_error(void);
98 asmlinkage
void alignment_check(void);
99 asmlinkage
void spurious_interrupt_bug(void);
100 asmlinkage
void machine_check(void);
102 int kstack_depth_to_print
= 24;
103 static unsigned int code_bytes
= 64;
105 static inline int valid_stack_ptr(struct thread_info
*tinfo
, void *p
, unsigned size
)
107 return p
> (void *)tinfo
&&
108 p
<= (void *)tinfo
+ THREAD_SIZE
- size
;
111 /* The form of the top of the frame on the stack */
113 struct stack_frame
*next_frame
;
114 unsigned long return_address
;
117 static inline unsigned long print_context_stack(struct thread_info
*tinfo
,
118 unsigned long *stack
, unsigned long bp
,
119 const struct stacktrace_ops
*ops
, void *data
)
121 #ifdef CONFIG_FRAME_POINTER
122 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
123 while (valid_stack_ptr(tinfo
, frame
, sizeof(*frame
))) {
124 struct stack_frame
*next
;
127 addr
= frame
->return_address
;
128 ops
->address(data
, addr
);
130 * break out of recursive entries (such as
131 * end_of_stack_stop_unwind_function). Also,
132 * we can never allow a frame pointer to
135 next
= frame
->next_frame
;
141 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
))) {
145 if (__kernel_text_address(addr
))
146 ops
->address(data
, addr
);
152 #define MSG(msg) ops->warning(data, msg)
154 void dump_trace(struct task_struct
*task
, struct pt_regs
*regs
,
155 unsigned long *stack
,
156 const struct stacktrace_ops
*ops
, void *data
)
158 unsigned long bp
= 0;
167 stack
= (unsigned long *)task
->thread
.sp
;
170 #ifdef CONFIG_FRAME_POINTER
172 if (task
== current
) {
173 /* Grab bp right from our regs */
174 asm ("movl %%ebp, %0" : "=r" (bp
) : );
176 /* bp is the last reg pushed by switch_to */
177 bp
= *(unsigned long *) task
->thread
.sp
;
183 struct thread_info
*context
;
184 context
= (struct thread_info
*)
185 ((unsigned long)stack
& (~(THREAD_SIZE
- 1)));
186 bp
= print_context_stack(context
, stack
, bp
, ops
, data
);
187 /* Should be after the line below, but somewhere
188 in early boot context comes out corrupted and we
189 can't reference it -AK */
190 if (ops
->stack(data
, "IRQ") < 0)
192 stack
= (unsigned long*)context
->previous_esp
;
195 touch_nmi_watchdog();
198 EXPORT_SYMBOL(dump_trace
);
201 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
204 print_symbol(msg
, symbol
);
208 static void print_trace_warning(void *data
, char *msg
)
210 printk("%s%s\n", (char *)data
, msg
);
213 static int print_trace_stack(void *data
, char *name
)
219 * Print one address/symbol entries per line.
221 static void print_trace_address(void *data
, unsigned long addr
)
223 printk("%s [<%08lx>] ", (char *)data
, addr
);
224 print_symbol("%s\n", addr
);
225 touch_nmi_watchdog();
228 static const struct stacktrace_ops print_trace_ops
= {
229 .warning
= print_trace_warning
,
230 .warning_symbol
= print_trace_warning_symbol
,
231 .stack
= print_trace_stack
,
232 .address
= print_trace_address
,
236 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
237 unsigned long * stack
, char *log_lvl
)
239 dump_trace(task
, regs
, stack
, &print_trace_ops
, log_lvl
);
240 printk("%s =======================\n", log_lvl
);
243 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
244 unsigned long * stack
)
246 show_trace_log_lvl(task
, regs
, stack
, "");
249 static void show_stack_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
250 unsigned long *sp
, char *log_lvl
)
252 unsigned long *stack
;
257 sp
= (unsigned long*)task
->thread
.sp
;
259 sp
= (unsigned long *)&sp
;
263 for(i
= 0; i
< kstack_depth_to_print
; i
++) {
264 if (kstack_end(stack
))
266 if (i
&& ((i
% 8) == 0))
267 printk("\n%s ", log_lvl
);
268 printk("%08lx ", *stack
++);
270 printk("\n%sCall Trace:\n", log_lvl
);
271 show_trace_log_lvl(task
, regs
, sp
, log_lvl
);
274 void show_stack(struct task_struct
*task
, unsigned long *sp
)
277 show_stack_log_lvl(task
, NULL
, sp
, "");
281 * The architecture-independent dump_stack generator
283 void dump_stack(void)
287 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
288 current
->pid
, current
->comm
, print_tainted(),
289 init_utsname()->release
,
290 (int)strcspn(init_utsname()->version
, " "),
291 init_utsname()->version
);
292 show_trace(current
, NULL
, &stack
);
295 EXPORT_SYMBOL(dump_stack
);
297 void show_registers(struct pt_regs
*regs
)
302 __show_registers(regs
, 0);
303 printk(KERN_EMERG
"Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
304 TASK_COMM_LEN
, current
->comm
, task_pid_nr(current
),
305 current_thread_info(), current
, task_thread_info(current
));
307 * When in-kernel, we also print out the stack and code at the
308 * time of the fault..
310 if (!user_mode_vm(regs
)) {
312 unsigned int code_prologue
= code_bytes
* 43 / 64;
313 unsigned int code_len
= code_bytes
;
316 printk("\n" KERN_EMERG
"Stack: ");
317 show_stack_log_lvl(NULL
, regs
, ®s
->sp
, KERN_EMERG
);
319 printk(KERN_EMERG
"Code: ");
321 ip
= (u8
*)regs
->ip
- code_prologue
;
322 if (ip
< (u8
*)PAGE_OFFSET
||
323 probe_kernel_address(ip
, c
)) {
324 /* try starting at EIP */
326 code_len
= code_len
- code_prologue
+ 1;
328 for (i
= 0; i
< code_len
; i
++, ip
++) {
329 if (ip
< (u8
*)PAGE_OFFSET
||
330 probe_kernel_address(ip
, c
)) {
331 printk(" Bad EIP value.");
334 if (ip
== (u8
*)regs
->ip
)
335 printk("<%02x> ", c
);
343 int is_valid_bugaddr(unsigned long ip
)
347 if (ip
< PAGE_OFFSET
)
349 if (probe_kernel_address((unsigned short *)ip
, ud2
))
352 return ud2
== 0x0b0f;
356 * This is gone through when something in the kernel has done something bad and
357 * is about to be terminated.
359 void die(const char * str
, struct pt_regs
* regs
, long err
)
364 int lock_owner_depth
;
366 .lock
= __RAW_SPIN_LOCK_UNLOCKED
,
368 .lock_owner_depth
= 0
370 static int die_counter
;
375 if (die
.lock_owner
!= raw_smp_processor_id()) {
377 raw_local_irq_save(flags
);
378 __raw_spin_lock(&die
.lock
);
379 die
.lock_owner
= smp_processor_id();
380 die
.lock_owner_depth
= 0;
383 raw_local_irq_save(flags
);
385 if (++die
.lock_owner_depth
< 3) {
389 report_bug(regs
->ip
, regs
);
391 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff,
393 #ifdef CONFIG_PREEMPT
399 #ifdef CONFIG_DEBUG_PAGEALLOC
400 printk("DEBUG_PAGEALLOC");
404 if (notify_die(DIE_OOPS
, str
, regs
, err
,
405 current
->thread
.trap_no
, SIGSEGV
) !=
407 show_registers(regs
);
408 /* Executive summary in case the oops scrolled away */
409 sp
= (unsigned long) (®s
->sp
);
411 if (user_mode(regs
)) {
413 ss
= regs
->ss
& 0xffff;
415 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
416 print_symbol("%s", regs
->ip
);
417 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
422 printk(KERN_EMERG
"Recursive die() failure, output suppressed\n");
426 add_taint(TAINT_DIE
);
427 __raw_spin_unlock(&die
.lock
);
428 raw_local_irq_restore(flags
);
433 if (kexec_should_crash(current
))
437 panic("Fatal exception in interrupt");
440 panic("Fatal exception");
446 static inline void die_if_kernel(const char * str
, struct pt_regs
* regs
, long err
)
448 if (!user_mode_vm(regs
))
452 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
, int vm86
,
453 struct pt_regs
* regs
, long error_code
,
456 struct task_struct
*tsk
= current
;
458 if (regs
->flags
& VM_MASK
) {
464 if (!user_mode(regs
))
469 * We want error_code and trap_no set for userspace faults and
470 * kernelspace faults which result in die(), but not
471 * kernelspace faults which are fixed up. die() gives the
472 * process no chance to handle the signal and notice the
473 * kernel fault information, so that won't result in polluting
474 * the information about previously queued, but not yet
475 * delivered, faults. See also do_general_protection below.
477 tsk
->thread
.error_code
= error_code
;
478 tsk
->thread
.trap_no
= trapnr
;
481 force_sig_info(signr
, info
, tsk
);
483 force_sig(signr
, tsk
);
488 if (!fixup_exception(regs
)) {
489 tsk
->thread
.error_code
= error_code
;
490 tsk
->thread
.trap_no
= trapnr
;
491 die(str
, regs
, error_code
);
497 int ret
= handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, trapnr
);
498 if (ret
) goto trap_signal
;
503 #define DO_ERROR(trapnr, signr, str, name) \
504 void do_##name(struct pt_regs * regs, long error_code) \
506 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
509 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
512 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
513 void do_##name(struct pt_regs * regs, long error_code) \
517 local_irq_enable(); \
518 info.si_signo = signr; \
520 info.si_code = sicode; \
521 info.si_addr = (void __user *)siaddr; \
522 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
525 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
528 #define DO_VM86_ERROR(trapnr, signr, str, name) \
529 void do_##name(struct pt_regs * regs, long error_code) \
531 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
534 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
537 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
538 void do_##name(struct pt_regs * regs, long error_code) \
541 info.si_signo = signr; \
543 info.si_code = sicode; \
544 info.si_addr = (void __user *)siaddr; \
545 trace_hardirqs_fixup(); \
546 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
549 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
552 DO_VM86_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
553 #ifndef CONFIG_KPROBES
554 DO_VM86_ERROR( 3, SIGTRAP
, "int3", int3
)
556 DO_VM86_ERROR( 4, SIGSEGV
, "overflow", overflow
)
557 DO_VM86_ERROR( 5, SIGSEGV
, "bounds", bounds
)
558 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
, 0)
559 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
560 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
561 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
562 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
)
563 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0, 0)
564 DO_ERROR_INFO(32, SIGSEGV
, "iret exception", iret_error
, ILL_BADSTK
, 0, 1)
566 void __kprobes
do_general_protection(struct pt_regs
* regs
,
570 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
571 struct thread_struct
*thread
= ¤t
->thread
;
574 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
575 * invalid offset set (the LAZY one) and the faulting thread has
576 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
577 * and we set the offset field correctly. Then we let the CPU to
578 * restart the faulting instruction.
580 if (tss
->x86_tss
.io_bitmap_base
== INVALID_IO_BITMAP_OFFSET_LAZY
&&
581 thread
->io_bitmap_ptr
) {
582 memcpy(tss
->io_bitmap
, thread
->io_bitmap_ptr
,
583 thread
->io_bitmap_max
);
585 * If the previously set map was extending to higher ports
586 * than the current one, pad extra space with 0xff (no access).
588 if (thread
->io_bitmap_max
< tss
->io_bitmap_max
)
589 memset((char *) tss
->io_bitmap
+
590 thread
->io_bitmap_max
, 0xff,
591 tss
->io_bitmap_max
- thread
->io_bitmap_max
);
592 tss
->io_bitmap_max
= thread
->io_bitmap_max
;
593 tss
->x86_tss
.io_bitmap_base
= IO_BITMAP_OFFSET
;
594 tss
->io_bitmap_owner
= thread
;
600 if (regs
->flags
& VM_MASK
)
603 if (!user_mode(regs
))
606 current
->thread
.error_code
= error_code
;
607 current
->thread
.trap_no
= 13;
608 if (show_unhandled_signals
&& unhandled_signal(current
, SIGSEGV
) &&
611 "%s[%d] general protection ip:%lx sp:%lx error:%lx\n",
612 current
->comm
, task_pid_nr(current
),
613 regs
->ip
, regs
->sp
, error_code
);
615 force_sig(SIGSEGV
, current
);
620 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
624 if (!fixup_exception(regs
)) {
625 current
->thread
.error_code
= error_code
;
626 current
->thread
.trap_no
= 13;
627 if (notify_die(DIE_GPF
, "general protection fault", regs
,
628 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
630 die("general protection fault", regs
, error_code
);
634 static __kprobes
void
635 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
637 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x on "
638 "CPU %d.\n", reason
, smp_processor_id());
639 printk(KERN_EMERG
"You have some hardware problem, likely on the PCI bus.\n");
641 #if defined(CONFIG_EDAC)
642 if(edac_handler_set()) {
643 edac_atomic_assert_error();
648 if (panic_on_unrecovered_nmi
)
649 panic("NMI: Not continuing");
651 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
653 /* Clear and disable the memory parity error line. */
654 clear_mem_error(reason
);
657 static __kprobes
void
658 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
662 printk(KERN_EMERG
"NMI: IOCK error (debug interrupt?)\n");
663 show_registers(regs
);
665 /* Re-enable the IOCK line, wait for a few seconds */
666 reason
= (reason
& 0xf) | 8;
669 while (--i
) udelay(1000);
674 static __kprobes
void
675 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
678 /* Might actually be able to figure out what the guilty party
685 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x on "
686 "CPU %d.\n", reason
, smp_processor_id());
687 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
688 if (panic_on_unrecovered_nmi
)
689 panic("NMI: Not continuing");
691 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
694 static DEFINE_SPINLOCK(nmi_print_lock
);
696 void __kprobes
die_nmi(struct pt_regs
*regs
, const char *msg
)
698 if (notify_die(DIE_NMIWATCHDOG
, msg
, regs
, 0, 2, SIGINT
) ==
702 spin_lock(&nmi_print_lock
);
704 * We are in trouble anyway, lets at least try
705 * to get a message out.
708 printk(KERN_EMERG
"%s", msg
);
709 printk(" on CPU%d, ip %08lx, registers:\n",
710 smp_processor_id(), regs
->ip
);
711 show_registers(regs
);
713 spin_unlock(&nmi_print_lock
);
716 /* If we are in kernel we are probably nested up pretty bad
717 * and might aswell get out now while we still can.
719 if (!user_mode_vm(regs
)) {
720 current
->thread
.trap_no
= 2;
727 static __kprobes
void default_do_nmi(struct pt_regs
* regs
)
729 unsigned char reason
= 0;
731 /* Only the BSP gets external NMIs from the system. */
732 if (!smp_processor_id())
733 reason
= get_nmi_reason();
735 if (!(reason
& 0xc0)) {
736 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
739 #ifdef CONFIG_X86_LOCAL_APIC
741 * Ok, so this is none of the documented NMI sources,
742 * so it must be the NMI watchdog.
744 if (nmi_watchdog_tick(regs
, reason
))
746 if (!do_nmi_callback(regs
, smp_processor_id()))
748 unknown_nmi_error(reason
, regs
);
752 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
755 mem_parity_error(reason
, regs
);
757 io_check_error(reason
, regs
);
759 * Reassert NMI in case it became active meanwhile
760 * as it's edge-triggered.
765 static int ignore_nmis
;
767 __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
773 cpu
= smp_processor_id();
778 default_do_nmi(regs
);
789 void restart_nmi(void)
795 #ifdef CONFIG_KPROBES
796 void __kprobes
do_int3(struct pt_regs
*regs
, long error_code
)
798 trace_hardirqs_fixup();
800 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
803 /* This is an interrupt gate, because kprobes wants interrupts
804 disabled. Normal trap handlers don't. */
805 restore_interrupts(regs
);
806 do_trap(3, SIGTRAP
, "int3", 1, regs
, error_code
, NULL
);
811 * Our handling of the processor debug registers is non-trivial.
812 * We do not clear them on entry and exit from the kernel. Therefore
813 * it is possible to get a watchpoint trap here from inside the kernel.
814 * However, the code in ./ptrace.c has ensured that the user can
815 * only set watchpoints on userspace addresses. Therefore the in-kernel
816 * watchpoint trap can only occur in code which is reading/writing
817 * from user space. Such code must not hold kernel locks (since it
818 * can equally take a page fault), therefore it is safe to call
819 * force_sig_info even though that claims and releases locks.
821 * Code in ./signal.c ensures that the debug control register
822 * is restored before we deliver any signal, and therefore that
823 * user code runs with the correct debug control register even though
826 * Being careful here means that we don't have to be as careful in a
827 * lot of more complicated places (task switching can be a bit lazy
828 * about restoring all the debug state, and ptrace doesn't have to
829 * find every occurrence of the TF bit that could be saved away even
832 void __kprobes
do_debug(struct pt_regs
* regs
, long error_code
)
834 unsigned int condition
;
835 struct task_struct
*tsk
= current
;
837 trace_hardirqs_fixup();
839 get_debugreg(condition
, 6);
842 * The processor cleared BTF, so don't mark that we need it set.
844 clear_tsk_thread_flag(tsk
, TIF_DEBUGCTLMSR
);
845 tsk
->thread
.debugctlmsr
= 0;
847 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
848 SIGTRAP
) == NOTIFY_STOP
)
850 /* It's safe to allow irq's after DR6 has been saved */
851 if (regs
->flags
& X86_EFLAGS_IF
)
854 /* Mask out spurious debug traps due to lazy DR7 setting */
855 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
856 if (!tsk
->thread
.debugreg7
)
860 if (regs
->flags
& VM_MASK
)
863 /* Save debug status register where ptrace can see it */
864 tsk
->thread
.debugreg6
= condition
;
867 * Single-stepping through TF: make sure we ignore any events in
868 * kernel space (but re-enable TF when returning to user mode).
870 if (condition
& DR_STEP
) {
872 * We already checked v86 mode above, so we can
873 * check for kernel mode by just checking the CPL
876 if (!user_mode(regs
))
877 goto clear_TF_reenable
;
880 /* Ok, finally something we can handle */
881 send_sigtrap(tsk
, regs
, error_code
);
883 /* Disable additional traps. They'll be re-enabled when
884 * the signal is delivered.
891 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, 1);
895 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
896 regs
->flags
&= ~TF_MASK
;
901 * Note that we play around with the 'TS' bit in an attempt to get
902 * the correct behaviour even in the presence of the asynchronous
905 void math_error(void __user
*ip
)
907 struct task_struct
* task
;
909 unsigned short cwd
, swd
;
912 * Save the info for the exception handler and clear the error.
916 task
->thread
.trap_no
= 16;
917 task
->thread
.error_code
= 0;
918 info
.si_signo
= SIGFPE
;
920 info
.si_code
= __SI_FAULT
;
923 * (~cwd & swd) will mask out exceptions that are not set to unmasked
924 * status. 0x3f is the exception bits in these regs, 0x200 is the
925 * C1 reg you need in case of a stack fault, 0x040 is the stack
926 * fault bit. We should only be taking one exception at a time,
927 * so if this combination doesn't produce any single exception,
928 * then we have a bad program that isn't syncronizing its FPU usage
929 * and it will suffer the consequences since we won't be able to
930 * fully reproduce the context of the exception
932 cwd
= get_fpu_cwd(task
);
933 swd
= get_fpu_swd(task
);
934 switch (swd
& ~cwd
& 0x3f) {
935 case 0x000: /* No unmasked exception */
937 default: /* Multiple exceptions */
939 case 0x001: /* Invalid Op */
941 * swd & 0x240 == 0x040: Stack Underflow
942 * swd & 0x240 == 0x240: Stack Overflow
943 * User must clear the SF bit (0x40) if set
945 info
.si_code
= FPE_FLTINV
;
947 case 0x002: /* Denormalize */
948 case 0x010: /* Underflow */
949 info
.si_code
= FPE_FLTUND
;
951 case 0x004: /* Zero Divide */
952 info
.si_code
= FPE_FLTDIV
;
954 case 0x008: /* Overflow */
955 info
.si_code
= FPE_FLTOVF
;
957 case 0x020: /* Precision */
958 info
.si_code
= FPE_FLTRES
;
961 force_sig_info(SIGFPE
, &info
, task
);
964 void do_coprocessor_error(struct pt_regs
* regs
, long error_code
)
967 math_error((void __user
*)regs
->ip
);
970 static void simd_math_error(void __user
*ip
)
972 struct task_struct
* task
;
974 unsigned short mxcsr
;
977 * Save the info for the exception handler and clear the error.
981 task
->thread
.trap_no
= 19;
982 task
->thread
.error_code
= 0;
983 info
.si_signo
= SIGFPE
;
985 info
.si_code
= __SI_FAULT
;
988 * The SIMD FPU exceptions are handled a little differently, as there
989 * is only a single status/control register. Thus, to determine which
990 * unmasked exception was caught we must mask the exception mask bits
991 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
993 mxcsr
= get_fpu_mxcsr(task
);
994 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
998 case 0x001: /* Invalid Op */
999 info
.si_code
= FPE_FLTINV
;
1001 case 0x002: /* Denormalize */
1002 case 0x010: /* Underflow */
1003 info
.si_code
= FPE_FLTUND
;
1005 case 0x004: /* Zero Divide */
1006 info
.si_code
= FPE_FLTDIV
;
1008 case 0x008: /* Overflow */
1009 info
.si_code
= FPE_FLTOVF
;
1011 case 0x020: /* Precision */
1012 info
.si_code
= FPE_FLTRES
;
1015 force_sig_info(SIGFPE
, &info
, task
);
1018 void do_simd_coprocessor_error(struct pt_regs
* regs
,
1022 /* Handle SIMD FPU exceptions on PIII+ processors. */
1024 simd_math_error((void __user
*)regs
->ip
);
1027 * Handle strange cache flush from user space exception
1028 * in all other cases. This is undocumented behaviour.
1030 if (regs
->flags
& VM_MASK
) {
1031 handle_vm86_fault((struct kernel_vm86_regs
*)regs
,
1035 current
->thread
.trap_no
= 19;
1036 current
->thread
.error_code
= error_code
;
1037 die_if_kernel("cache flush denied", regs
, error_code
);
1038 force_sig(SIGSEGV
, current
);
1042 void do_spurious_interrupt_bug(struct pt_regs
* regs
,
1046 /* No need to warn about this any longer. */
1047 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1051 unsigned long patch_espfix_desc(unsigned long uesp
,
1054 struct desc_struct
*gdt
= __get_cpu_var(gdt_page
).gdt
;
1055 unsigned long base
= (kesp
- uesp
) & -THREAD_SIZE
;
1056 unsigned long new_kesp
= kesp
- base
;
1057 unsigned long lim_pages
= (new_kesp
| (THREAD_SIZE
- 1)) >> PAGE_SHIFT
;
1058 __u64 desc
= *(__u64
*)&gdt
[GDT_ENTRY_ESPFIX_SS
];
1059 /* Set up base for espfix segment */
1060 desc
&= 0x00f0ff0000000000ULL
;
1061 desc
|= ((((__u64
)base
) << 16) & 0x000000ffffff0000ULL
) |
1062 ((((__u64
)base
) << 32) & 0xff00000000000000ULL
) |
1063 ((((__u64
)lim_pages
) << 32) & 0x000f000000000000ULL
) |
1064 (lim_pages
& 0xffff);
1065 *(__u64
*)&gdt
[GDT_ENTRY_ESPFIX_SS
] = desc
;
1070 * 'math_state_restore()' saves the current math information in the
1071 * old math state array, and gets the new ones from the current task
1073 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1074 * Don't touch unless you *really* know how it works.
1076 * Must be called with kernel preemption disabled (in this case,
1077 * local interrupts are disabled at the call-site in entry.S).
1079 asmlinkage
void math_state_restore(void)
1081 struct thread_info
*thread
= current_thread_info();
1082 struct task_struct
*tsk
= thread
->task
;
1084 clts(); /* Allow maths ops (or we recurse) */
1085 if (!tsk_used_math(tsk
))
1088 thread
->status
|= TS_USEDFPU
; /* So we fnsave on switch_to() */
1091 EXPORT_SYMBOL_GPL(math_state_restore
);
1093 #ifndef CONFIG_MATH_EMULATION
1095 asmlinkage
void math_emulate(long arg
)
1097 printk(KERN_EMERG
"math-emulation not enabled and no coprocessor found.\n");
1098 printk(KERN_EMERG
"killing %s.\n",current
->comm
);
1099 force_sig(SIGFPE
,current
);
1103 #endif /* CONFIG_MATH_EMULATION */
1106 void __init
trap_init(void)
1111 void __iomem
*p
= ioremap(0x0FFFD9, 4);
1112 if (readl(p
) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1118 #ifdef CONFIG_X86_LOCAL_APIC
1119 init_apic_mappings();
1122 set_trap_gate(0,÷_error
);
1123 set_intr_gate(1,&debug
);
1124 set_intr_gate(2,&nmi
);
1125 set_system_intr_gate(3, &int3
); /* int3/4 can be called from all */
1126 set_system_gate(4,&overflow
);
1127 set_trap_gate(5,&bounds
);
1128 set_trap_gate(6,&invalid_op
);
1129 set_trap_gate(7,&device_not_available
);
1130 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS
);
1131 set_trap_gate(9,&coprocessor_segment_overrun
);
1132 set_trap_gate(10,&invalid_TSS
);
1133 set_trap_gate(11,&segment_not_present
);
1134 set_trap_gate(12,&stack_segment
);
1135 set_trap_gate(13,&general_protection
);
1136 set_intr_gate(14,&page_fault
);
1137 set_trap_gate(15,&spurious_interrupt_bug
);
1138 set_trap_gate(16,&coprocessor_error
);
1139 set_trap_gate(17,&alignment_check
);
1140 #ifdef CONFIG_X86_MCE
1141 set_trap_gate(18,&machine_check
);
1143 set_trap_gate(19,&simd_coprocessor_error
);
1147 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1148 * Generates a compile-time "error: zero width for bit-field" if
1149 * the alignment is wrong.
1151 struct fxsrAlignAssert
{
1152 int _
:!(offsetof(struct task_struct
,
1153 thread
.i387
.fxsave
) & 15);
1156 printk(KERN_INFO
"Enabling fast FPU save and restore... ");
1157 set_in_cr4(X86_CR4_OSFXSR
);
1161 printk(KERN_INFO
"Enabling unmasked SIMD FPU exception "
1163 set_in_cr4(X86_CR4_OSXMMEXCPT
);
1167 set_system_gate(SYSCALL_VECTOR
,&system_call
);
1169 /* Reserve all the builtin and the syscall vector. */
1170 for (i
= 0; i
< FIRST_EXTERNAL_VECTOR
; i
++)
1171 set_bit(i
, used_vectors
);
1172 set_bit(SYSCALL_VECTOR
, used_vectors
);
1175 * Should be a barrier for any external CPU state.
1182 static int __init
kstack_setup(char *s
)
1184 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
1187 __setup("kstack=", kstack_setup
);
1189 static int __init
code_bytes_setup(char *s
)
1191 code_bytes
= simple_strtoul(s
, NULL
, 0);
1192 if (code_bytes
> 8192)
1197 __setup("code_bytes=", code_bytes_setup
);