traps: x86: various noop-changes preparing for unification of traps_xx.c
[deliverable/linux.git] / arch / x86 / kernel / traps_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
1da177e4
LT
7 */
8
9/*
c1d518c8 10 * Handle hardware traps and faults.
1da177e4 11 */
badc7652
AH
12#include <linux/interrupt.h>
13#include <linux/kallsyms.h>
14#include <linux/spinlock.h>
15#include <linux/kprobes.h>
16#include <linux/uaccess.h>
17#include <linux/utsname.h>
18#include <linux/kdebug.h>
1da177e4 19#include <linux/kernel.h>
badc7652
AH
20#include <linux/module.h>
21#include <linux/ptrace.h>
1da177e4 22#include <linux/string.h>
badc7652
AH
23#include <linux/unwind.h>
24#include <linux/delay.h>
1da177e4 25#include <linux/errno.h>
badc7652
AH
26#include <linux/kexec.h>
27#include <linux/sched.h>
1da177e4 28#include <linux/timer.h>
1da177e4 29#include <linux/init.h>
c31a0bf3 30#include <linux/bug.h>
badc7652
AH
31#include <linux/nmi.h>
32#include <linux/mm.h>
4df9e510
GP
33#include <linux/smp.h>
34#include <linux/io.h>
e32ede19 35
c0d12172
DJ
36#if defined(CONFIG_EDAC)
37#include <linux/edac.h>
38#endif
39
badc7652
AH
40#include <asm/stacktrace.h>
41#include <asm/processor.h>
c1d518c8 42#include <asm/kmemcheck.h>
1da177e4 43#include <asm/debugreg.h>
badc7652
AH
44#include <asm/atomic.h>
45#include <asm/system.h>
46#include <asm/unwind.h>
c1d518c8 47#include <asm/traps.h>
1da177e4
LT
48#include <asm/desc.h>
49#include <asm/i387.h>
c1d518c8
AH
50
51#include <mach_traps.h>
52
1da177e4 53#include <asm/pgalloc.h>
1da177e4 54#include <asm/proto.h>
badc7652
AH
55#include <asm/pda.h>
56
1da177e4 57
badc7652 58static int ignore_nmis;
a25bd949 59
1da177e4
LT
60static inline void conditional_sti(struct pt_regs *regs)
61{
65ea5b03 62 if (regs->flags & X86_EFLAGS_IF)
1da177e4
LT
63 local_irq_enable();
64}
65
a65d17c9
JB
66static inline void preempt_conditional_sti(struct pt_regs *regs)
67{
e8bff74a 68 inc_preempt_count();
65ea5b03 69 if (regs->flags & X86_EFLAGS_IF)
a65d17c9
JB
70 local_irq_enable();
71}
72
73static inline void preempt_conditional_cli(struct pt_regs *regs)
74{
65ea5b03 75 if (regs->flags & X86_EFLAGS_IF)
a65d17c9 76 local_irq_disable();
e8bff74a 77 dec_preempt_count();
a65d17c9
JB
78}
79
a8c1be9d
AH
80static void __kprobes
81do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
82 long error_code, siginfo_t *info)
1da177e4 83{
6e3f3617
JB
84 struct task_struct *tsk = current;
85
a7bbb0ce
AH
86 if (!user_mode(regs))
87 goto kernel_trap;
1da177e4 88
a7bbb0ce
AH
89 /*
90 * We want error_code and trap_no set for userspace faults and
91 * kernelspace faults which result in die(), but not
92 * kernelspace faults which are fixed up. die() gives the
93 * process no chance to handle the signal and notice the
94 * kernel fault information, so that won't result in polluting
95 * the information about previously queued, but not yet
96 * delivered, faults. See also do_general_protection below.
97 */
98 tsk->thread.error_code = error_code;
99 tsk->thread.trap_no = trapnr;
100
101 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
102 printk_ratelimit()) {
103 printk(KERN_INFO
104 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
105 tsk->comm, tsk->pid, str,
106 regs->ip, regs->sp, error_code);
107 print_vma_addr(" in ", regs->ip);
108 printk("\n");
1da177e4
LT
109 }
110
a7bbb0ce
AH
111 if (info)
112 force_sig_info(signr, info, tsk);
113 else
114 force_sig(signr, tsk);
115 return;
1da177e4 116
a7bbb0ce 117kernel_trap:
b3a5acc1
HH
118 if (!fixup_exception(regs)) {
119 tsk->thread.error_code = error_code;
120 tsk->thread.trap_no = trapnr;
121 die(str, regs, error_code);
1da177e4 122 }
b3a5acc1 123 return;
1da177e4
LT
124}
125
3c1326f8 126#define DO_ERROR(trapnr, signr, str, name) \
e407d620 127dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
a8c1be9d
AH
128{ \
129 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
130 == NOTIFY_STOP) \
131 return; \
40e59a61 132 conditional_sti(regs); \
a8c1be9d 133 do_trap(trapnr, signr, str, regs, error_code, NULL); \
1da177e4
LT
134}
135
a8c1be9d 136#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
e407d620 137dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
a8c1be9d
AH
138{ \
139 siginfo_t info; \
140 info.si_signo = signr; \
141 info.si_errno = 0; \
142 info.si_code = sicode; \
143 info.si_addr = (void __user *)siaddr; \
a8c1be9d
AH
144 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
145 == NOTIFY_STOP) \
146 return; \
40e59a61 147 conditional_sti(regs); \
a8c1be9d 148 do_trap(trapnr, signr, str, regs, error_code, &info); \
1da177e4
LT
149}
150
a8c1be9d
AH
151DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
152DO_ERROR(4, SIGSEGV, "overflow", overflow)
153DO_ERROR(5, SIGSEGV, "bounds", bounds)
154DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
155DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
1da177e4 156DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
a8c1be9d 157DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
1da177e4 158DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
40e59a61
AK
159
160/* Runs on IST stack */
e407d620 161dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
40e59a61
AK
162{
163 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
164 12, SIGBUS) == NOTIFY_STOP)
165 return;
166 preempt_conditional_sti(regs);
167 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
168 preempt_conditional_cli(regs);
169}
eca37c18 170
e407d620 171dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
eca37c18
JB
172{
173 static const char str[] = "double fault";
174 struct task_struct *tsk = current;
175
176 /* Return not checked because double check cannot be ignored */
177 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
178
179 tsk->thread.error_code = error_code;
180 tsk->thread.trap_no = 8;
181
182 /* This is always a kernel trap and never fixable (and thus must
183 never return). */
184 for (;;)
185 die(str, regs, error_code);
186}
1da177e4 187
e407d620 188dotraplinkage void __kprobes
13485ab5 189do_general_protection(struct pt_regs *regs, long error_code)
1da177e4 190{
13485ab5 191 struct task_struct *tsk;
6e3f3617 192
1da177e4
LT
193 conditional_sti(regs);
194
13485ab5
AH
195 tsk = current;
196 if (!user_mode(regs))
197 goto gp_in_kernel;
1da177e4 198
13485ab5
AH
199 tsk->thread.error_code = error_code;
200 tsk->thread.trap_no = 13;
201
202 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
203 printk_ratelimit()) {
204 printk(KERN_INFO
205 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
a5ae2330 206 tsk->comm, task_pid_nr(tsk),
13485ab5
AH
207 regs->ip, regs->sp, error_code);
208 print_vma_addr(" in ", regs->ip);
209 printk("\n");
210 }
211
212 force_sig(SIGSEGV, tsk);
213 return;
1da177e4 214
13485ab5 215gp_in_kernel:
b3a5acc1
HH
216 if (fixup_exception(regs))
217 return;
d1895183 218
b3a5acc1
HH
219 tsk->thread.error_code = error_code;
220 tsk->thread.trap_no = 13;
221 if (notify_die(DIE_GPF, "general protection fault", regs,
222 error_code, 13, SIGSEGV) == NOTIFY_STOP)
223 return;
224 die("general protection fault", regs, error_code);
1da177e4
LT
225}
226
5deb45e3 227static notrace __kprobes void
a8c1be9d 228mem_parity_error(unsigned char reason, struct pt_regs *regs)
1da177e4 229{
c1d518c8
AH
230 printk(KERN_EMERG
231 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
232 reason, smp_processor_id());
233
234 printk(KERN_EMERG
235 "You have some hardware problem, likely on the PCI bus.\n");
c41c5cd3 236
c0d12172 237#if defined(CONFIG_EDAC)
a8c1be9d 238 if (edac_handler_set()) {
c0d12172
DJ
239 edac_atomic_assert_error();
240 return;
241 }
242#endif
243
8da5adda 244 if (panic_on_unrecovered_nmi)
c41c5cd3
DZ
245 panic("NMI: Not continuing");
246
247 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
248
249 /* Clear and disable the memory parity error line. */
250 reason = (reason & 0xf) | 4;
251 outb(reason, 0x61);
252}
253
5deb45e3 254static notrace __kprobes void
a8c1be9d 255io_check_error(unsigned char reason, struct pt_regs *regs)
1da177e4 256{
1c9af8a9
AH
257 unsigned long i;
258
259 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
1da177e4
LT
260 show_registers(regs);
261
262 /* Re-enable the IOCK line, wait for a few seconds */
263 reason = (reason & 0xf) | 8;
264 outb(reason, 0x61);
1c9af8a9
AH
265
266 i = 2000;
267 while (--i)
268 udelay(1000);
269
1da177e4
LT
270 reason &= ~8;
271 outb(reason, 0x61);
272}
273
5deb45e3 274static notrace __kprobes void
4df9e510 275unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
c41c5cd3 276{
4df9e510
GP
277 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
278 NOTIFY_STOP)
d3597524 279 return;
c1d518c8
AH
280 printk(KERN_EMERG
281 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
282 reason, smp_processor_id());
8da5adda 283
c1d518c8 284 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
8da5adda 285 if (panic_on_unrecovered_nmi)
c41c5cd3 286 panic("NMI: Not continuing");
8da5adda 287
c41c5cd3 288 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
289}
290
c1d518c8 291static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
1da177e4
LT
292{
293 unsigned char reason = 0;
76e4f660
AR
294 int cpu;
295
296 cpu = smp_processor_id();
1da177e4 297
a8c1be9d 298 /* Only the BSP gets external NMIs from the system. */
76e4f660 299 if (!cpu)
1da177e4
LT
300 reason = get_nmi_reason();
301
302 if (!(reason & 0xc0)) {
6e3f3617 303 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
1da177e4
LT
304 == NOTIFY_STOP)
305 return;
1da177e4
LT
306 /*
307 * Ok, so this is none of the documented NMI sources,
308 * so it must be the NMI watchdog.
309 */
a8c1be9d 310 if (nmi_watchdog_tick(regs, reason))
1da177e4 311 return;
a8c1be9d 312 if (!do_nmi_callback(regs, cpu))
3adbbcce
DZ
313 unknown_nmi_error(reason, regs);
314
1da177e4
LT
315 return;
316 }
6e3f3617 317 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
a8c1be9d 318 return;
1da177e4
LT
319
320 /* AK: following checks seem to be broken on modern chipsets. FIXME */
1da177e4
LT
321 if (reason & 0x80)
322 mem_parity_error(reason, regs);
323 if (reason & 0x40)
324 io_check_error(reason, regs);
325}
326
e407d620 327dotraplinkage notrace __kprobes void
c6425b9f
CG
328do_nmi(struct pt_regs *regs, long error_code)
329{
330 nmi_enter();
a8c1be9d 331
c6425b9f 332 add_pda(__nmi_count, 1);
a8c1be9d 333
c6425b9f
CG
334 if (!ignore_nmis)
335 default_do_nmi(regs);
a8c1be9d 336
c6425b9f
CG
337 nmi_exit();
338}
339
340void stop_nmi(void)
341{
342 acpi_nmi_disable();
343 ignore_nmis++;
344}
345
346void restart_nmi(void)
347{
348 ignore_nmis--;
349 acpi_nmi_enable();
350}
351
c1d518c8 352/* May run on IST stack. */
e407d620 353dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
1da177e4 354{
a8c1be9d
AH
355 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
356 == NOTIFY_STOP)
1da177e4 357 return;
a8c1be9d 358
40e59a61 359 preempt_conditional_sti(regs);
1da177e4 360 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
40e59a61 361 preempt_conditional_cli(regs);
1da177e4
LT
362}
363
6fefb0d1
AK
364/* Help handler running on IST stack to switch back to user stack
365 for scheduling or signal handling. The actual stack switch is done in
366 entry.S */
eddb6fb9 367asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
6fefb0d1
AK
368{
369 struct pt_regs *regs = eregs;
370 /* Did already sync */
65ea5b03 371 if (eregs == (struct pt_regs *)eregs->sp)
6fefb0d1
AK
372 ;
373 /* Exception from user space */
76381fee 374 else if (user_mode(eregs))
bb049232 375 regs = task_pt_regs(current);
6fefb0d1 376 /* Exception from kernel and interrupts are enabled. Move to
4df9e510 377 kernel process stack. */
65ea5b03
PA
378 else if (eregs->flags & X86_EFLAGS_IF)
379 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
6fefb0d1
AK
380 if (eregs != regs)
381 *regs = *eregs;
382 return regs;
383}
384
c1d518c8
AH
385/*
386 * Our handling of the processor debug registers is non-trivial.
387 * We do not clear them on entry and exit from the kernel. Therefore
388 * it is possible to get a watchpoint trap here from inside the kernel.
389 * However, the code in ./ptrace.c has ensured that the user can
390 * only set watchpoints on userspace addresses. Therefore the in-kernel
391 * watchpoint trap can only occur in code which is reading/writing
392 * from user space. Such code must not hold kernel locks (since it
393 * can equally take a page fault), therefore it is safe to call
394 * force_sig_info even though that claims and releases locks.
395 *
396 * Code in ./signal.c ensures that the debug control register
397 * is restored before we deliver any signal, and therefore that
398 * user code runs with the correct debug control register even though
399 * we clear it here.
400 *
401 * Being careful here means that we don't have to be as careful in a
402 * lot of more complicated places (task switching can be a bit lazy
403 * about restoring all the debug state, and ptrace doesn't have to
404 * find every occurrence of the TF bit that could be saved away even
405 * by user code)
406 *
407 * May run on IST stack.
408 */
e407d620 409dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
1da177e4 410{
1da177e4 411 struct task_struct *tsk = current;
7b4fd4bb 412 unsigned long condition;
3d2a71a5 413 int si_code;
1da177e4 414
e9129e56 415 get_debugreg(condition, 6);
1da177e4 416
10faa81e
RM
417 /*
418 * The processor cleared BTF, so don't mark that we need it set.
419 */
420 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
421 tsk->thread.debugctlmsr = 0;
422
1da177e4 423 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
daeeafec 424 SIGTRAP) == NOTIFY_STOP)
6fefb0d1 425 return;
daeeafec 426
3d2a71a5 427 /* It's safe to allow irq's after DR6 has been saved */
a65d17c9 428 preempt_conditional_sti(regs);
1da177e4
LT
429
430 /* Mask out spurious debug traps due to lazy DR7 setting */
431 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
a8c1be9d 432 if (!tsk->thread.debugreg7)
1da177e4 433 goto clear_dr7;
1da177e4
LT
434 }
435
3d2a71a5 436 /* Save debug status register where ptrace can see it */
1da177e4
LT
437 tsk->thread.debugreg6 = condition;
438
e1f28773
RM
439 /*
440 * Single-stepping through TF: make sure we ignore any events in
441 * kernel space (but re-enable TF when returning to user mode).
442 */
daeeafec 443 if (condition & DR_STEP) {
a8c1be9d
AH
444 if (!user_mode(regs))
445 goto clear_TF_reenable;
1da177e4
LT
446 }
447
3d2a71a5 448 si_code = get_si_code(condition);
1da177e4 449 /* Ok, finally something we can handle */
3d2a71a5 450 send_sigtrap(tsk, regs, error_code, si_code);
1da177e4 451
3d2a71a5
AH
452 /*
453 * Disable additional traps. They'll be re-enabled when
454 * the signal is delivered.
455 */
1da177e4 456clear_dr7:
a8c1be9d 457 set_debugreg(0, 7);
a65d17c9 458 preempt_conditional_cli(regs);
6fefb0d1 459 return;
1da177e4
LT
460
461clear_TF_reenable:
462 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
053de044 463 regs->flags &= ~X86_EFLAGS_TF;
a65d17c9 464 preempt_conditional_cli(regs);
a8c1be9d 465 return;
1da177e4
LT
466}
467
6e3f3617 468static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
1da177e4 469{
b3a5acc1 470 if (fixup_exception(regs))
1da177e4 471 return 1;
b3a5acc1 472
6e3f3617 473 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
3a848f63 474 /* Illegal floating point operation in the kernel */
6e3f3617 475 current->thread.trap_no = trapnr;
1da177e4 476 die(str, regs, 0);
1da177e4
LT
477 return 0;
478}
479
480/*
481 * Note that we play around with the 'TS' bit in an attempt to get
482 * the correct behaviour even in the presence of the asynchronous
483 * IRQ13 behaviour
484 */
a28680b4 485void math_error(void __user *ip)
1da177e4 486{
a8c1be9d 487 struct task_struct *task;
1da177e4
LT
488 siginfo_t info;
489 unsigned short cwd, swd;
490
1da177e4
LT
491 /*
492 * Save the info for the exception handler and clear the error.
493 */
494 task = current;
495 save_init_fpu(task);
496 task->thread.trap_no = 16;
497 task->thread.error_code = 0;
498 info.si_signo = SIGFPE;
499 info.si_errno = 0;
500 info.si_code = __SI_FAULT;
65ea5b03 501 info.si_addr = ip;
1da177e4
LT
502 /*
503 * (~cwd & swd) will mask out exceptions that are not set to unmasked
504 * status. 0x3f is the exception bits in these regs, 0x200 is the
505 * C1 reg you need in case of a stack fault, 0x040 is the stack
506 * fault bit. We should only be taking one exception at a time,
507 * so if this combination doesn't produce any single exception,
508 * then we have a bad program that isn't synchronizing its FPU usage
509 * and it will suffer the consequences since we won't be able to
510 * fully reproduce the context of the exception
511 */
512 cwd = get_fpu_cwd(task);
513 swd = get_fpu_swd(task);
ff347b22 514 switch (swd & ~cwd & 0x3f) {
a8c1be9d
AH
515 case 0x000: /* No unmasked exception */
516 default: /* Multiple exceptions */
517 break;
518 case 0x001: /* Invalid Op */
519 /*
520 * swd & 0x240 == 0x040: Stack Underflow
521 * swd & 0x240 == 0x240: Stack Overflow
522 * User must clear the SF bit (0x40) if set
523 */
524 info.si_code = FPE_FLTINV;
525 break;
526 case 0x002: /* Denormalize */
527 case 0x010: /* Underflow */
528 info.si_code = FPE_FLTUND;
529 break;
530 case 0x004: /* Zero Divide */
531 info.si_code = FPE_FLTDIV;
532 break;
533 case 0x008: /* Overflow */
534 info.si_code = FPE_FLTOVF;
535 break;
536 case 0x020: /* Precision */
537 info.si_code = FPE_FLTRES;
538 break;
1da177e4
LT
539 }
540 force_sig_info(SIGFPE, &info, task);
541}
542
e407d620 543dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
a28680b4
AH
544{
545 conditional_sti(regs);
546 if (!user_mode(regs) &&
547 kernel_math_error(regs, "kernel x87 math error", 16))
548 return;
549 math_error((void __user *)regs->ip);
550}
551
a28680b4 552static void simd_math_error(void __user *ip)
1da177e4 553{
a8c1be9d 554 struct task_struct *task;
1da177e4
LT
555 siginfo_t info;
556 unsigned short mxcsr;
557
1da177e4
LT
558 /*
559 * Save the info for the exception handler and clear the error.
560 */
561 task = current;
562 save_init_fpu(task);
563 task->thread.trap_no = 19;
564 task->thread.error_code = 0;
565 info.si_signo = SIGFPE;
566 info.si_errno = 0;
567 info.si_code = __SI_FAULT;
65ea5b03 568 info.si_addr = ip;
1da177e4
LT
569 /*
570 * The SIMD FPU exceptions are handled a little differently, as there
571 * is only a single status/control register. Thus, to determine which
572 * unmasked exception was caught we must mask the exception mask bits
573 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
574 */
575 mxcsr = get_fpu_mxcsr(task);
576 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
a8c1be9d
AH
577 case 0x000:
578 default:
579 break;
580 case 0x001: /* Invalid Op */
581 info.si_code = FPE_FLTINV;
582 break;
583 case 0x002: /* Denormalize */
584 case 0x010: /* Underflow */
585 info.si_code = FPE_FLTUND;
586 break;
587 case 0x004: /* Zero Divide */
588 info.si_code = FPE_FLTDIV;
589 break;
590 case 0x008: /* Overflow */
591 info.si_code = FPE_FLTOVF;
592 break;
593 case 0x020: /* Precision */
594 info.si_code = FPE_FLTRES;
595 break;
1da177e4
LT
596 }
597 force_sig_info(SIGFPE, &info, task);
598}
599
e407d620
AH
600dotraplinkage void
601do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
a28680b4
AH
602{
603 conditional_sti(regs);
604 if (!user_mode(regs) &&
605 kernel_math_error(regs, "kernel simd math error", 19))
606 return;
607 simd_math_error((void __user *)regs->ip);
608}
609
e407d620
AH
610dotraplinkage void
611do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1da177e4
LT
612{
613}
614
615asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
89b831ef
JS
616{
617}
618
619asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
1da177e4
LT
620{
621}
622
623/*
a8c1be9d 624 * 'math_state_restore()' saves the current math information in the
1da177e4
LT
625 * old math state array, and gets the new ones from the current task
626 *
627 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
628 * Don't touch unless you *really* know how it works.
629 */
630asmlinkage void math_state_restore(void)
631{
091d30c8
AH
632 struct thread_info *thread = current_thread_info();
633 struct task_struct *tsk = thread->task;
1da177e4 634
091d30c8 635 if (!tsk_used_math(tsk)) {
aa283f49
SS
636 local_irq_enable();
637 /*
638 * does a slab alloc which can sleep
639 */
091d30c8 640 if (init_fpu(tsk)) {
aa283f49
SS
641 /*
642 * ran out of memory!
643 */
644 do_group_exit(SIGKILL);
645 return;
646 }
647 local_irq_disable();
648 }
649
a8c1be9d 650 clts(); /* Allow maths ops (or we recurse) */
6ffac1e9
SS
651 /*
652 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
653 */
091d30c8 654 if (unlikely(restore_fpu_checking(tsk))) {
6ffac1e9 655 stts();
091d30c8 656 force_sig(SIGSEGV, tsk);
6ffac1e9
SS
657 return;
658 }
091d30c8
AH
659 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
660 tsk->fpu_counter++;
1da177e4 661}
21db5584 662EXPORT_SYMBOL_GPL(math_state_restore);
1da177e4 663
e407d620
AH
664dotraplinkage void __kprobes
665do_device_not_available(struct pt_regs *regs, long error)
666{
667 math_state_restore();
668}
669
1da177e4
LT
670void __init trap_init(void)
671{
a8c1be9d
AH
672 set_intr_gate(0, &divide_error);
673 set_intr_gate_ist(1, &debug, DEBUG_STACK);
674 set_intr_gate_ist(2, &nmi, NMI_STACK);
4df9e510 675 /* int3 can be called from all */
699d2937 676 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
4df9e510 677 /* int4 can be called from all */
699d2937 678 set_system_intr_gate(4, &overflow);
a8c1be9d
AH
679 set_intr_gate(5, &bounds);
680 set_intr_gate(6, &invalid_op);
681 set_intr_gate(7, &device_not_available);
682 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
683 set_intr_gate(9, &coprocessor_segment_overrun);
684 set_intr_gate(10, &invalid_TSS);
685 set_intr_gate(11, &segment_not_present);
686 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
687 set_intr_gate(13, &general_protection);
688 set_intr_gate(14, &page_fault);
689 set_intr_gate(15, &spurious_interrupt_bug);
690 set_intr_gate(16, &coprocessor_error);
691 set_intr_gate(17, &alignment_check);
1da177e4 692#ifdef CONFIG_X86_MCE
a8c1be9d 693 set_intr_gate_ist(18, &machine_check, MCE_STACK);
1da177e4 694#endif
a8c1be9d 695 set_intr_gate(19, &simd_coprocessor_error);
1da177e4
LT
696
697#ifdef CONFIG_IA32_EMULATION
699d2937 698 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1da177e4 699#endif
1da177e4 700 /*
a8c1be9d 701 * Should be a barrier for any external CPU state:
1da177e4
LT
702 */
703 cpu_init();
704}
This page took 0.488524 seconds and 5 git commands to generate.