[PATCH] x86_64: actively synchronize vmalloc area when registering certain callbacks
[deliverable/linux.git] / arch / x86_64 / kernel / traps.c
1 /*
2 * linux/arch/x86-64/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
6 *
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 *
10 * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
11 */
12
13 /*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'entry.S'.
16 */
17 #include <linux/config.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/nmi.h>
32 #include <linux/kprobes.h>
33
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/atomic.h>
38 #include <asm/debugreg.h>
39 #include <asm/desc.h>
40 #include <asm/i387.h>
41 #include <asm/kdebug.h>
42 #include <asm/processor.h>
43
44 #include <asm/smp.h>
45 #include <asm/pgalloc.h>
46 #include <asm/pda.h>
47 #include <asm/proto.h>
48 #include <asm/nmi.h>
49
50 extern struct gate_struct idt_table[256];
51
52 asmlinkage void divide_error(void);
53 asmlinkage void debug(void);
54 asmlinkage void nmi(void);
55 asmlinkage void int3(void);
56 asmlinkage void overflow(void);
57 asmlinkage void bounds(void);
58 asmlinkage void invalid_op(void);
59 asmlinkage void device_not_available(void);
60 asmlinkage void double_fault(void);
61 asmlinkage void coprocessor_segment_overrun(void);
62 asmlinkage void invalid_TSS(void);
63 asmlinkage void segment_not_present(void);
64 asmlinkage void stack_segment(void);
65 asmlinkage void general_protection(void);
66 asmlinkage void page_fault(void);
67 asmlinkage void coprocessor_error(void);
68 asmlinkage void simd_coprocessor_error(void);
69 asmlinkage void reserved(void);
70 asmlinkage void alignment_check(void);
71 asmlinkage void machine_check(void);
72 asmlinkage void spurious_interrupt_bug(void);
73
74 struct notifier_block *die_chain;
75 static DEFINE_SPINLOCK(die_notifier_lock);
76
77 int register_die_notifier(struct notifier_block *nb)
78 {
79 int err = 0;
80 unsigned long flags;
81
82 vmalloc_sync_all();
83 spin_lock_irqsave(&die_notifier_lock, flags);
84 err = notifier_chain_register(&die_chain, nb);
85 spin_unlock_irqrestore(&die_notifier_lock, flags);
86 return err;
87 }
88
89 static inline void conditional_sti(struct pt_regs *regs)
90 {
91 if (regs->eflags & X86_EFLAGS_IF)
92 local_irq_enable();
93 }
94
95 static inline void preempt_conditional_sti(struct pt_regs *regs)
96 {
97 preempt_disable();
98 if (regs->eflags & X86_EFLAGS_IF)
99 local_irq_enable();
100 }
101
102 static inline void preempt_conditional_cli(struct pt_regs *regs)
103 {
104 if (regs->eflags & X86_EFLAGS_IF)
105 local_irq_disable();
106 preempt_enable_no_resched();
107 }
108
109 static int kstack_depth_to_print = 10;
110
111 #ifdef CONFIG_KALLSYMS
112 #include <linux/kallsyms.h>
113 int printk_address(unsigned long address)
114 {
115 unsigned long offset = 0, symsize;
116 const char *symname;
117 char *modname;
118 char *delim = ":";
119 char namebuf[128];
120
121 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
122 if (!symname)
123 return printk("[<%016lx>]", address);
124 if (!modname)
125 modname = delim = "";
126 return printk("<%016lx>{%s%s%s%s%+ld}",
127 address,delim,modname,delim,symname,offset);
128 }
129 #else
130 int printk_address(unsigned long address)
131 {
132 return printk("[<%016lx>]", address);
133 }
134 #endif
135
136 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
137 unsigned *usedp, const char **idp)
138 {
139 static char ids[][8] = {
140 [DEBUG_STACK - 1] = "#DB",
141 [NMI_STACK - 1] = "NMI",
142 [DOUBLEFAULT_STACK - 1] = "#DF",
143 [STACKFAULT_STACK - 1] = "#SS",
144 [MCE_STACK - 1] = "#MC",
145 #if DEBUG_STKSZ > EXCEPTION_STKSZ
146 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
147 #endif
148 };
149 unsigned k;
150
151 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
152 unsigned long end;
153
154 switch (k + 1) {
155 #if DEBUG_STKSZ > EXCEPTION_STKSZ
156 case DEBUG_STACK:
157 end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
158 break;
159 #endif
160 default:
161 end = per_cpu(init_tss, cpu).ist[k];
162 break;
163 }
164 if (stack >= end)
165 continue;
166 if (stack >= end - EXCEPTION_STKSZ) {
167 if (*usedp & (1U << k))
168 break;
169 *usedp |= 1U << k;
170 *idp = ids[k];
171 return (unsigned long *)end;
172 }
173 #if DEBUG_STKSZ > EXCEPTION_STKSZ
174 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
175 unsigned j = N_EXCEPTION_STACKS - 1;
176
177 do {
178 ++j;
179 end -= EXCEPTION_STKSZ;
180 ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
181 } while (stack < end - EXCEPTION_STKSZ);
182 if (*usedp & (1U << j))
183 break;
184 *usedp |= 1U << j;
185 *idp = ids[j];
186 return (unsigned long *)end;
187 }
188 #endif
189 }
190 return NULL;
191 }
192
193 /*
194 * x86-64 can have upto three kernel stacks:
195 * process stack
196 * interrupt stack
197 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
198 */
199
200 void show_trace(unsigned long *stack)
201 {
202 const unsigned cpu = safe_smp_processor_id();
203 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
204 int i;
205 unsigned used = 0;
206
207 printk("\nCall Trace:");
208
209 #define HANDLE_STACK(cond) \
210 do while (cond) { \
211 unsigned long addr = *stack++; \
212 if (kernel_text_address(addr)) { \
213 if (i > 50) { \
214 printk("\n "); \
215 i = 0; \
216 } \
217 else \
218 i += printk(" "); \
219 /* \
220 * If the address is either in the text segment of the \
221 * kernel, or in the region which contains vmalloc'ed \
222 * memory, it *may* be the address of a calling \
223 * routine; if so, print it so that someone tracing \
224 * down the cause of the crash will be able to figure \
225 * out the call path that was taken. \
226 */ \
227 i += printk_address(addr); \
228 } \
229 } while (0)
230
231 for(i = 11; ; ) {
232 const char *id;
233 unsigned long *estack_end;
234 estack_end = in_exception_stack(cpu, (unsigned long)stack,
235 &used, &id);
236
237 if (estack_end) {
238 i += printk(" <%s>", id);
239 HANDLE_STACK (stack < estack_end);
240 i += printk(" <EOE>");
241 stack = (unsigned long *) estack_end[-2];
242 continue;
243 }
244 if (irqstack_end) {
245 unsigned long *irqstack;
246 irqstack = irqstack_end -
247 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
248
249 if (stack >= irqstack && stack < irqstack_end) {
250 i += printk(" <IRQ>");
251 HANDLE_STACK (stack < irqstack_end);
252 stack = (unsigned long *) (irqstack_end[-1]);
253 irqstack_end = NULL;
254 i += printk(" <EOI>");
255 continue;
256 }
257 }
258 break;
259 }
260
261 HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
262 #undef HANDLE_STACK
263 printk("\n");
264 }
265
266 void show_stack(struct task_struct *tsk, unsigned long * rsp)
267 {
268 unsigned long *stack;
269 int i;
270 const int cpu = safe_smp_processor_id();
271 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
272 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
273
274 // debugging aid: "show_stack(NULL, NULL);" prints the
275 // back trace for this cpu.
276
277 if (rsp == NULL) {
278 if (tsk)
279 rsp = (unsigned long *)tsk->thread.rsp;
280 else
281 rsp = (unsigned long *)&rsp;
282 }
283
284 stack = rsp;
285 for(i=0; i < kstack_depth_to_print; i++) {
286 if (stack >= irqstack && stack <= irqstack_end) {
287 if (stack == irqstack_end) {
288 stack = (unsigned long *) (irqstack_end[-1]);
289 printk(" <EOI> ");
290 }
291 } else {
292 if (((long) stack & (THREAD_SIZE-1)) == 0)
293 break;
294 }
295 if (i && ((i % 4) == 0))
296 printk("\n ");
297 printk("%016lx ", *stack++);
298 touch_nmi_watchdog();
299 }
300 show_trace((unsigned long *)rsp);
301 }
302
303 /*
304 * The architecture-independent dump_stack generator
305 */
306 void dump_stack(void)
307 {
308 unsigned long dummy;
309 show_trace(&dummy);
310 }
311
312 EXPORT_SYMBOL(dump_stack);
313
314 void show_registers(struct pt_regs *regs)
315 {
316 int i;
317 int in_kernel = !user_mode(regs);
318 unsigned long rsp;
319 const int cpu = safe_smp_processor_id();
320 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
321
322 rsp = regs->rsp;
323
324 printk("CPU %d ", cpu);
325 __show_regs(regs);
326 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
327 cur->comm, cur->pid, task_thread_info(cur), cur);
328
329 /*
330 * When in-kernel, we also print out the stack and code at the
331 * time of the fault..
332 */
333 if (in_kernel) {
334
335 printk("Stack: ");
336 show_stack(NULL, (unsigned long*)rsp);
337
338 printk("\nCode: ");
339 if(regs->rip < PAGE_OFFSET)
340 goto bad;
341
342 for(i=0;i<20;i++)
343 {
344 unsigned char c;
345 if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
346 bad:
347 printk(" Bad RIP value.");
348 break;
349 }
350 printk("%02x ", c);
351 }
352 }
353 printk("\n");
354 }
355
356 void handle_BUG(struct pt_regs *regs)
357 {
358 struct bug_frame f;
359 long len;
360 const char *prefix = "";
361
362 if (user_mode(regs))
363 return;
364 if (__copy_from_user(&f, (const void __user *) regs->rip,
365 sizeof(struct bug_frame)))
366 return;
367 if (f.filename >= 0 ||
368 f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
369 return;
370 len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
371 if (len < 0 || len >= PATH_MAX)
372 f.filename = (int)(long)"unmapped filename";
373 else if (len > 50) {
374 f.filename += len - 50;
375 prefix = "...";
376 }
377 printk("----------- [cut here ] --------- [please bite here ] ---------\n");
378 printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
379 }
380
381 #ifdef CONFIG_BUG
382 void out_of_line_bug(void)
383 {
384 BUG();
385 }
386 #endif
387
388 static DEFINE_SPINLOCK(die_lock);
389 static int die_owner = -1;
390
391 unsigned __kprobes long oops_begin(void)
392 {
393 int cpu = safe_smp_processor_id();
394 unsigned long flags;
395
396 /* racy, but better than risking deadlock. */
397 local_irq_save(flags);
398 if (!spin_trylock(&die_lock)) {
399 if (cpu == die_owner)
400 /* nested oops. should stop eventually */;
401 else
402 spin_lock(&die_lock);
403 }
404 die_owner = cpu;
405 console_verbose();
406 bust_spinlocks(1);
407 return flags;
408 }
409
410 void __kprobes oops_end(unsigned long flags)
411 {
412 die_owner = -1;
413 bust_spinlocks(0);
414 spin_unlock_irqrestore(&die_lock, flags);
415 if (panic_on_oops)
416 panic("Oops");
417 }
418
419 void __kprobes __die(const char * str, struct pt_regs * regs, long err)
420 {
421 static int die_counter;
422 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
423 #ifdef CONFIG_PREEMPT
424 printk("PREEMPT ");
425 #endif
426 #ifdef CONFIG_SMP
427 printk("SMP ");
428 #endif
429 #ifdef CONFIG_DEBUG_PAGEALLOC
430 printk("DEBUG_PAGEALLOC");
431 #endif
432 printk("\n");
433 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
434 show_registers(regs);
435 /* Executive summary in case the oops scrolled away */
436 printk(KERN_ALERT "RIP ");
437 printk_address(regs->rip);
438 printk(" RSP <%016lx>\n", regs->rsp);
439 }
440
441 void die(const char * str, struct pt_regs * regs, long err)
442 {
443 unsigned long flags = oops_begin();
444
445 handle_BUG(regs);
446 __die(str, regs, err);
447 oops_end(flags);
448 do_exit(SIGSEGV);
449 }
450
451 void __kprobes die_nmi(char *str, struct pt_regs *regs)
452 {
453 unsigned long flags = oops_begin();
454
455 /*
456 * We are in trouble anyway, lets at least try
457 * to get a message out.
458 */
459 printk(str, safe_smp_processor_id());
460 show_registers(regs);
461 if (panic_on_timeout || panic_on_oops)
462 panic("nmi watchdog");
463 printk("console shuts up ...\n");
464 oops_end(flags);
465 do_exit(SIGSEGV);
466 }
467
468 static void __kprobes do_trap(int trapnr, int signr, char *str,
469 struct pt_regs * regs, long error_code,
470 siginfo_t *info)
471 {
472 struct task_struct *tsk = current;
473
474 conditional_sti(regs);
475
476 tsk->thread.error_code = error_code;
477 tsk->thread.trap_no = trapnr;
478
479 if (user_mode(regs)) {
480 if (exception_trace && unhandled_signal(tsk, signr))
481 printk(KERN_INFO
482 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
483 tsk->comm, tsk->pid, str,
484 regs->rip,regs->rsp,error_code);
485
486 if (info)
487 force_sig_info(signr, info, tsk);
488 else
489 force_sig(signr, tsk);
490 return;
491 }
492
493
494 /* kernel trap */
495 {
496 const struct exception_table_entry *fixup;
497 fixup = search_exception_tables(regs->rip);
498 if (fixup) {
499 regs->rip = fixup->fixup;
500 } else
501 die(str, regs, error_code);
502 return;
503 }
504 }
505
506 #define DO_ERROR(trapnr, signr, str, name) \
507 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
508 { \
509 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
510 == NOTIFY_STOP) \
511 return; \
512 do_trap(trapnr, signr, str, regs, error_code, NULL); \
513 }
514
515 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
516 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
517 { \
518 siginfo_t info; \
519 info.si_signo = signr; \
520 info.si_errno = 0; \
521 info.si_code = sicode; \
522 info.si_addr = (void __user *)siaddr; \
523 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
524 == NOTIFY_STOP) \
525 return; \
526 do_trap(trapnr, signr, str, regs, error_code, &info); \
527 }
528
529 DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
530 DO_ERROR( 4, SIGSEGV, "overflow", overflow)
531 DO_ERROR( 5, SIGSEGV, "bounds", bounds)
532 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
533 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
534 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
535 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
536 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
537 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
538 DO_ERROR(18, SIGSEGV, "reserved", reserved)
539 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
540
541 asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
542 {
543 static const char str[] = "double fault";
544 struct task_struct *tsk = current;
545
546 /* Return not checked because double check cannot be ignored */
547 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
548
549 tsk->thread.error_code = error_code;
550 tsk->thread.trap_no = 8;
551
552 /* This is always a kernel trap and never fixable (and thus must
553 never return). */
554 for (;;)
555 die(str, regs, error_code);
556 }
557
558 asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
559 long error_code)
560 {
561 struct task_struct *tsk = current;
562
563 conditional_sti(regs);
564
565 tsk->thread.error_code = error_code;
566 tsk->thread.trap_no = 13;
567
568 if (user_mode(regs)) {
569 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
570 printk(KERN_INFO
571 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
572 tsk->comm, tsk->pid,
573 regs->rip,regs->rsp,error_code);
574
575 force_sig(SIGSEGV, tsk);
576 return;
577 }
578
579 /* kernel gp */
580 {
581 const struct exception_table_entry *fixup;
582 fixup = search_exception_tables(regs->rip);
583 if (fixup) {
584 regs->rip = fixup->fixup;
585 return;
586 }
587 if (notify_die(DIE_GPF, "general protection fault", regs,
588 error_code, 13, SIGSEGV) == NOTIFY_STOP)
589 return;
590 die("general protection fault", regs, error_code);
591 }
592 }
593
594 static __kprobes void
595 mem_parity_error(unsigned char reason, struct pt_regs * regs)
596 {
597 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
598 printk("You probably have a hardware problem with your RAM chips\n");
599
600 /* Clear and disable the memory parity error line. */
601 reason = (reason & 0xf) | 4;
602 outb(reason, 0x61);
603 }
604
605 static __kprobes void
606 io_check_error(unsigned char reason, struct pt_regs * regs)
607 {
608 printk("NMI: IOCK error (debug interrupt?)\n");
609 show_registers(regs);
610
611 /* Re-enable the IOCK line, wait for a few seconds */
612 reason = (reason & 0xf) | 8;
613 outb(reason, 0x61);
614 mdelay(2000);
615 reason &= ~8;
616 outb(reason, 0x61);
617 }
618
619 static __kprobes void
620 unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
621 { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
622 printk("Dazed and confused, but trying to continue\n");
623 printk("Do you have a strange power saving mode enabled?\n");
624 }
625
626 /* Runs on IST stack. This code must keep interrupts off all the time.
627 Nested NMIs are prevented by the CPU. */
628 asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
629 {
630 unsigned char reason = 0;
631 int cpu;
632
633 cpu = smp_processor_id();
634
635 /* Only the BSP gets external NMIs from the system. */
636 if (!cpu)
637 reason = get_nmi_reason();
638
639 if (!(reason & 0xc0)) {
640 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
641 == NOTIFY_STOP)
642 return;
643 #ifdef CONFIG_X86_LOCAL_APIC
644 /*
645 * Ok, so this is none of the documented NMI sources,
646 * so it must be the NMI watchdog.
647 */
648 if (nmi_watchdog > 0) {
649 nmi_watchdog_tick(regs,reason);
650 return;
651 }
652 #endif
653 unknown_nmi_error(reason, regs);
654 return;
655 }
656 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
657 return;
658
659 /* AK: following checks seem to be broken on modern chipsets. FIXME */
660
661 if (reason & 0x80)
662 mem_parity_error(reason, regs);
663 if (reason & 0x40)
664 io_check_error(reason, regs);
665 }
666
667 /* runs on IST stack. */
668 asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
669 {
670 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
671 return;
672 }
673 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
674 return;
675 }
676
677 /* Help handler running on IST stack to switch back to user stack
678 for scheduling or signal handling. The actual stack switch is done in
679 entry.S */
680 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
681 {
682 struct pt_regs *regs = eregs;
683 /* Did already sync */
684 if (eregs == (struct pt_regs *)eregs->rsp)
685 ;
686 /* Exception from user space */
687 else if (user_mode(eregs))
688 regs = task_pt_regs(current);
689 /* Exception from kernel and interrupts are enabled. Move to
690 kernel process stack. */
691 else if (eregs->eflags & X86_EFLAGS_IF)
692 regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
693 if (eregs != regs)
694 *regs = *eregs;
695 return regs;
696 }
697
698 /* runs on IST stack. */
699 asmlinkage void __kprobes do_debug(struct pt_regs * regs,
700 unsigned long error_code)
701 {
702 unsigned long condition;
703 struct task_struct *tsk = current;
704 siginfo_t info;
705
706 get_debugreg(condition, 6);
707
708 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
709 SIGTRAP) == NOTIFY_STOP)
710 return;
711
712 preempt_conditional_sti(regs);
713
714 /* Mask out spurious debug traps due to lazy DR7 setting */
715 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
716 if (!tsk->thread.debugreg7) {
717 goto clear_dr7;
718 }
719 }
720
721 tsk->thread.debugreg6 = condition;
722
723 /* Mask out spurious TF errors due to lazy TF clearing */
724 if (condition & DR_STEP) {
725 /*
726 * The TF error should be masked out only if the current
727 * process is not traced and if the TRAP flag has been set
728 * previously by a tracing process (condition detected by
729 * the PT_DTRACE flag); remember that the i386 TRAP flag
730 * can be modified by the process itself in user mode,
731 * allowing programs to debug themselves without the ptrace()
732 * interface.
733 */
734 if (!user_mode(regs))
735 goto clear_TF_reenable;
736 /*
737 * Was the TF flag set by a debugger? If so, clear it now,
738 * so that register information is correct.
739 */
740 if (tsk->ptrace & PT_DTRACE) {
741 regs->eflags &= ~TF_MASK;
742 tsk->ptrace &= ~PT_DTRACE;
743 }
744 }
745
746 /* Ok, finally something we can handle */
747 tsk->thread.trap_no = 1;
748 tsk->thread.error_code = error_code;
749 info.si_signo = SIGTRAP;
750 info.si_errno = 0;
751 info.si_code = TRAP_BRKPT;
752 info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
753 force_sig_info(SIGTRAP, &info, tsk);
754
755 clear_dr7:
756 set_debugreg(0UL, 7);
757 preempt_conditional_cli(regs);
758 return;
759
760 clear_TF_reenable:
761 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
762 regs->eflags &= ~TF_MASK;
763 preempt_conditional_cli(regs);
764 }
765
766 static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
767 {
768 const struct exception_table_entry *fixup;
769 fixup = search_exception_tables(regs->rip);
770 if (fixup) {
771 regs->rip = fixup->fixup;
772 return 1;
773 }
774 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
775 /* Illegal floating point operation in the kernel */
776 current->thread.trap_no = trapnr;
777 die(str, regs, 0);
778 return 0;
779 }
780
781 /*
782 * Note that we play around with the 'TS' bit in an attempt to get
783 * the correct behaviour even in the presence of the asynchronous
784 * IRQ13 behaviour
785 */
786 asmlinkage void do_coprocessor_error(struct pt_regs *regs)
787 {
788 void __user *rip = (void __user *)(regs->rip);
789 struct task_struct * task;
790 siginfo_t info;
791 unsigned short cwd, swd;
792
793 conditional_sti(regs);
794 if (!user_mode(regs) &&
795 kernel_math_error(regs, "kernel x87 math error", 16))
796 return;
797
798 /*
799 * Save the info for the exception handler and clear the error.
800 */
801 task = current;
802 save_init_fpu(task);
803 task->thread.trap_no = 16;
804 task->thread.error_code = 0;
805 info.si_signo = SIGFPE;
806 info.si_errno = 0;
807 info.si_code = __SI_FAULT;
808 info.si_addr = rip;
809 /*
810 * (~cwd & swd) will mask out exceptions that are not set to unmasked
811 * status. 0x3f is the exception bits in these regs, 0x200 is the
812 * C1 reg you need in case of a stack fault, 0x040 is the stack
813 * fault bit. We should only be taking one exception at a time,
814 * so if this combination doesn't produce any single exception,
815 * then we have a bad program that isn't synchronizing its FPU usage
816 * and it will suffer the consequences since we won't be able to
817 * fully reproduce the context of the exception
818 */
819 cwd = get_fpu_cwd(task);
820 swd = get_fpu_swd(task);
821 switch (swd & ~cwd & 0x3f) {
822 case 0x000:
823 default:
824 break;
825 case 0x001: /* Invalid Op */
826 /*
827 * swd & 0x240 == 0x040: Stack Underflow
828 * swd & 0x240 == 0x240: Stack Overflow
829 * User must clear the SF bit (0x40) if set
830 */
831 info.si_code = FPE_FLTINV;
832 break;
833 case 0x002: /* Denormalize */
834 case 0x010: /* Underflow */
835 info.si_code = FPE_FLTUND;
836 break;
837 case 0x004: /* Zero Divide */
838 info.si_code = FPE_FLTDIV;
839 break;
840 case 0x008: /* Overflow */
841 info.si_code = FPE_FLTOVF;
842 break;
843 case 0x020: /* Precision */
844 info.si_code = FPE_FLTRES;
845 break;
846 }
847 force_sig_info(SIGFPE, &info, task);
848 }
849
850 asmlinkage void bad_intr(void)
851 {
852 printk("bad interrupt");
853 }
854
855 asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
856 {
857 void __user *rip = (void __user *)(regs->rip);
858 struct task_struct * task;
859 siginfo_t info;
860 unsigned short mxcsr;
861
862 conditional_sti(regs);
863 if (!user_mode(regs) &&
864 kernel_math_error(regs, "kernel simd math error", 19))
865 return;
866
867 /*
868 * Save the info for the exception handler and clear the error.
869 */
870 task = current;
871 save_init_fpu(task);
872 task->thread.trap_no = 19;
873 task->thread.error_code = 0;
874 info.si_signo = SIGFPE;
875 info.si_errno = 0;
876 info.si_code = __SI_FAULT;
877 info.si_addr = rip;
878 /*
879 * The SIMD FPU exceptions are handled a little differently, as there
880 * is only a single status/control register. Thus, to determine which
881 * unmasked exception was caught we must mask the exception mask bits
882 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
883 */
884 mxcsr = get_fpu_mxcsr(task);
885 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
886 case 0x000:
887 default:
888 break;
889 case 0x001: /* Invalid Op */
890 info.si_code = FPE_FLTINV;
891 break;
892 case 0x002: /* Denormalize */
893 case 0x010: /* Underflow */
894 info.si_code = FPE_FLTUND;
895 break;
896 case 0x004: /* Zero Divide */
897 info.si_code = FPE_FLTDIV;
898 break;
899 case 0x008: /* Overflow */
900 info.si_code = FPE_FLTOVF;
901 break;
902 case 0x020: /* Precision */
903 info.si_code = FPE_FLTRES;
904 break;
905 }
906 force_sig_info(SIGFPE, &info, task);
907 }
908
909 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
910 {
911 }
912
913 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
914 {
915 }
916
917 asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
918 {
919 }
920
921 /*
922 * 'math_state_restore()' saves the current math information in the
923 * old math state array, and gets the new ones from the current task
924 *
925 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
926 * Don't touch unless you *really* know how it works.
927 */
928 asmlinkage void math_state_restore(void)
929 {
930 struct task_struct *me = current;
931 clts(); /* Allow maths ops (or we recurse) */
932
933 if (!used_math())
934 init_fpu(me);
935 restore_fpu_checking(&me->thread.i387.fxsave);
936 task_thread_info(me)->status |= TS_USEDFPU;
937 }
938
939 void __init trap_init(void)
940 {
941 set_intr_gate(0,&divide_error);
942 set_intr_gate_ist(1,&debug,DEBUG_STACK);
943 set_intr_gate_ist(2,&nmi,NMI_STACK);
944 set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
945 set_system_gate(4,&overflow); /* int4 can be called from all */
946 set_intr_gate(5,&bounds);
947 set_intr_gate(6,&invalid_op);
948 set_intr_gate(7,&device_not_available);
949 set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
950 set_intr_gate(9,&coprocessor_segment_overrun);
951 set_intr_gate(10,&invalid_TSS);
952 set_intr_gate(11,&segment_not_present);
953 set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
954 set_intr_gate(13,&general_protection);
955 set_intr_gate(14,&page_fault);
956 set_intr_gate(15,&spurious_interrupt_bug);
957 set_intr_gate(16,&coprocessor_error);
958 set_intr_gate(17,&alignment_check);
959 #ifdef CONFIG_X86_MCE
960 set_intr_gate_ist(18,&machine_check, MCE_STACK);
961 #endif
962 set_intr_gate(19,&simd_coprocessor_error);
963
964 #ifdef CONFIG_IA32_EMULATION
965 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
966 #endif
967
968 /*
969 * Should be a barrier for any external CPU state.
970 */
971 cpu_init();
972 }
973
974
975 /* Actual parsing is done early in setup.c. */
976 static int __init oops_dummy(char *s)
977 {
978 panic_on_oops = 1;
979 return -1;
980 }
981 __setup("oops=", oops_dummy);
982
983 static int __init kstack_setup(char *s)
984 {
985 kstack_depth_to_print = simple_strtoul(s,NULL,0);
986 return 0;
987 }
988 __setup("kstack=", kstack_setup);
989
This page took 0.052152 seconds and 5 git commands to generate.