Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
a8c1be9d | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
1da177e4 LT |
4 | * |
5 | * Pentium III FXSR, SSE support | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
8 | ||
9 | /* | |
10 | * 'Traps.c' handles hardware traps and faults after we have saved some | |
11 | * state in 'asm.s'. | |
12 | */ | |
b5964405 IM |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kallsyms.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/kprobes.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/utsname.h> | |
20 | #include <linux/kdebug.h> | |
1da177e4 | 21 | #include <linux/kernel.h> |
b5964405 IM |
22 | #include <linux/module.h> |
23 | #include <linux/ptrace.h> | |
1da177e4 | 24 | #include <linux/string.h> |
b5964405 IM |
25 | #include <linux/unwind.h> |
26 | #include <linux/delay.h> | |
1da177e4 | 27 | #include <linux/errno.h> |
b5964405 IM |
28 | #include <linux/kexec.h> |
29 | #include <linux/sched.h> | |
1da177e4 | 30 | #include <linux/timer.h> |
1da177e4 | 31 | #include <linux/init.h> |
91768d6c | 32 | #include <linux/bug.h> |
b5964405 IM |
33 | #include <linux/nmi.h> |
34 | #include <linux/mm.h> | |
1da177e4 LT |
35 | |
36 | #ifdef CONFIG_EISA | |
37 | #include <linux/ioport.h> | |
38 | #include <linux/eisa.h> | |
39 | #endif | |
40 | ||
41 | #ifdef CONFIG_MCA | |
42 | #include <linux/mca.h> | |
43 | #endif | |
44 | ||
c0d12172 DJ |
45 | #if defined(CONFIG_EDAC) |
46 | #include <linux/edac.h> | |
47 | #endif | |
48 | ||
7643e9b9 | 49 | #include <asm/processor-flags.h> |
b5964405 IM |
50 | #include <asm/arch_hooks.h> |
51 | #include <asm/stacktrace.h> | |
1da177e4 | 52 | #include <asm/processor.h> |
1da177e4 | 53 | #include <asm/debugreg.h> |
b5964405 IM |
54 | #include <asm/atomic.h> |
55 | #include <asm/system.h> | |
56 | #include <asm/unwind.h> | |
1da177e4 LT |
57 | #include <asm/desc.h> |
58 | #include <asm/i387.h> | |
59 | #include <asm/nmi.h> | |
1da177e4 | 60 | #include <asm/smp.h> |
b5964405 | 61 | #include <asm/io.h> |
6ac8d51f | 62 | #include <asm/traps.h> |
1da177e4 LT |
63 | |
64 | #include "mach_traps.h" | |
eb642f62 | 65 | #include "cpu/mcheck/mce.h" |
1da177e4 | 66 | |
dbeb2be2 RR |
67 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
68 | EXPORT_SYMBOL_GPL(used_vectors); | |
69 | ||
1da177e4 LT |
70 | asmlinkage int system_call(void); |
71 | ||
1da177e4 | 72 | /* Do we ignore FPU interrupts ? */ |
b5964405 | 73 | char ignore_fpu_irq; |
1da177e4 LT |
74 | |
75 | /* | |
76 | * The IDT has to be page-aligned to simplify the Pentium | |
77 | * F0 0F bug workaround.. We have a special link segment | |
78 | * for this. | |
79 | */ | |
010d4f82 | 80 | gate_desc idt_table[256] |
6842ef0e | 81 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
1da177e4 | 82 | |
badc7652 | 83 | static int ignore_nmis; |
e041c683 | 84 | |
762db434 AH |
85 | static inline void conditional_sti(struct pt_regs *regs) |
86 | { | |
87 | if (regs->flags & X86_EFLAGS_IF) | |
88 | local_irq_enable(); | |
89 | } | |
90 | ||
3d2a71a5 AH |
91 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
92 | { | |
93 | inc_preempt_count(); | |
94 | if (regs->flags & X86_EFLAGS_IF) | |
95 | local_irq_enable(); | |
96 | } | |
97 | ||
98 | static inline void preempt_conditional_cli(struct pt_regs *regs) | |
99 | { | |
100 | if (regs->flags & X86_EFLAGS_IF) | |
101 | local_irq_disable(); | |
102 | dec_preempt_count(); | |
103 | } | |
104 | ||
b5964405 IM |
105 | static inline void |
106 | die_if_kernel(const char *str, struct pt_regs *regs, long err) | |
1da177e4 | 107 | { |
717b594a | 108 | if (!user_mode_vm(regs)) |
1da177e4 LT |
109 | die(str, regs, err); |
110 | } | |
111 | ||
ae82157b AH |
112 | /* |
113 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | |
114 | * invalid offset set (the LAZY one) and the faulting thread has | |
115 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS, | |
116 | * we set the offset field correctly and return 1. | |
117 | */ | |
118 | static int lazy_iobitmap_copy(void) | |
119 | { | |
120 | struct thread_struct *thread; | |
121 | struct tss_struct *tss; | |
122 | int cpu; | |
123 | ||
124 | cpu = get_cpu(); | |
125 | tss = &per_cpu(init_tss, cpu); | |
126 | thread = ¤t->thread; | |
127 | ||
128 | if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | |
129 | thread->io_bitmap_ptr) { | |
130 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | |
131 | thread->io_bitmap_max); | |
132 | /* | |
133 | * If the previously set map was extending to higher ports | |
134 | * than the current one, pad extra space with 0xff (no access). | |
135 | */ | |
136 | if (thread->io_bitmap_max < tss->io_bitmap_max) { | |
137 | memset((char *) tss->io_bitmap + | |
138 | thread->io_bitmap_max, 0xff, | |
139 | tss->io_bitmap_max - thread->io_bitmap_max); | |
140 | } | |
141 | tss->io_bitmap_max = thread->io_bitmap_max; | |
142 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | |
143 | tss->io_bitmap_owner = thread; | |
144 | put_cpu(); | |
145 | ||
146 | return 1; | |
147 | } | |
148 | put_cpu(); | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
b5964405 | 153 | static void __kprobes |
3c1326f8 | 154 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
b5964405 | 155 | long error_code, siginfo_t *info) |
1da177e4 | 156 | { |
4f339ecb | 157 | struct task_struct *tsk = current; |
4f339ecb | 158 | |
6b6891f9 | 159 | if (regs->flags & X86_VM_MASK) { |
3c1326f8 AH |
160 | /* |
161 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. | |
162 | * On nmi (interrupt 2), do_trap should not be called. | |
163 | */ | |
164 | if (trapnr < 6) | |
1da177e4 LT |
165 | goto vm86_trap; |
166 | goto trap_signal; | |
167 | } | |
168 | ||
717b594a | 169 | if (!user_mode(regs)) |
1da177e4 LT |
170 | goto kernel_trap; |
171 | ||
b5964405 IM |
172 | trap_signal: |
173 | /* | |
174 | * We want error_code and trap_no set for userspace faults and | |
175 | * kernelspace faults which result in die(), but not | |
176 | * kernelspace faults which are fixed up. die() gives the | |
177 | * process no chance to handle the signal and notice the | |
178 | * kernel fault information, so that won't result in polluting | |
179 | * the information about previously queued, but not yet | |
180 | * delivered, faults. See also do_general_protection below. | |
181 | */ | |
182 | tsk->thread.error_code = error_code; | |
183 | tsk->thread.trap_no = trapnr; | |
d1895183 | 184 | |
b5964405 IM |
185 | if (info) |
186 | force_sig_info(signr, info, tsk); | |
187 | else | |
188 | force_sig(signr, tsk); | |
189 | return; | |
1da177e4 | 190 | |
b5964405 IM |
191 | kernel_trap: |
192 | if (!fixup_exception(regs)) { | |
193 | tsk->thread.error_code = error_code; | |
194 | tsk->thread.trap_no = trapnr; | |
195 | die(str, regs, error_code); | |
1da177e4 | 196 | } |
b5964405 | 197 | return; |
1da177e4 | 198 | |
b5964405 IM |
199 | vm86_trap: |
200 | if (handle_vm86_trap((struct kernel_vm86_regs *) regs, | |
201 | error_code, trapnr)) | |
202 | goto trap_signal; | |
203 | return; | |
1da177e4 LT |
204 | } |
205 | ||
b5964405 | 206 | #define DO_ERROR(trapnr, signr, str, name) \ |
e407d620 | 207 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
208 | { \ |
209 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
a8c1be9d | 210 | == NOTIFY_STOP) \ |
b5964405 | 211 | return; \ |
61aef7d2 | 212 | conditional_sti(regs); \ |
3c1326f8 | 213 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
1da177e4 LT |
214 | } |
215 | ||
3c1326f8 | 216 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
e407d620 | 217 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
218 | { \ |
219 | siginfo_t info; \ | |
220 | info.si_signo = signr; \ | |
221 | info.si_errno = 0; \ | |
222 | info.si_code = sicode; \ | |
223 | info.si_addr = (void __user *)siaddr; \ | |
b5964405 | 224 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
a8c1be9d | 225 | == NOTIFY_STOP) \ |
b5964405 | 226 | return; \ |
61aef7d2 | 227 | conditional_sti(regs); \ |
3c1326f8 | 228 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
1da177e4 LT |
229 | } |
230 | ||
3c1326f8 AH |
231 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
232 | DO_ERROR(4, SIGSEGV, "overflow", overflow) | |
233 | DO_ERROR(5, SIGSEGV, "bounds", bounds) | |
234 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | |
51bc1ed6 | 235 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
6bf77bf9 | 236 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
36d936c7 | 237 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
f5ca8187 | 238 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) |
3c1326f8 | 239 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
1da177e4 | 240 | |
e407d620 | 241 | dotraplinkage void __kprobes |
13485ab5 | 242 | do_general_protection(struct pt_regs *regs, long error_code) |
1da177e4 | 243 | { |
13485ab5 | 244 | struct task_struct *tsk; |
b5964405 | 245 | |
c6df0d71 AH |
246 | conditional_sti(regs); |
247 | ||
ae82157b AH |
248 | if (lazy_iobitmap_copy()) { |
249 | /* restart the faulting instruction */ | |
1da177e4 LT |
250 | return; |
251 | } | |
1da177e4 | 252 | |
6b6891f9 | 253 | if (regs->flags & X86_VM_MASK) |
1da177e4 LT |
254 | goto gp_in_vm86; |
255 | ||
13485ab5 | 256 | tsk = current; |
717b594a | 257 | if (!user_mode(regs)) |
1da177e4 LT |
258 | goto gp_in_kernel; |
259 | ||
13485ab5 AH |
260 | tsk->thread.error_code = error_code; |
261 | tsk->thread.trap_no = 13; | |
b5964405 | 262 | |
13485ab5 AH |
263 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
264 | printk_ratelimit()) { | |
abd4f750 | 265 | printk(KERN_INFO |
13485ab5 AH |
266 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", |
267 | tsk->comm, task_pid_nr(tsk), | |
268 | regs->ip, regs->sp, error_code); | |
03252919 AK |
269 | print_vma_addr(" in ", regs->ip); |
270 | printk("\n"); | |
271 | } | |
abd4f750 | 272 | |
13485ab5 | 273 | force_sig(SIGSEGV, tsk); |
1da177e4 LT |
274 | return; |
275 | ||
276 | gp_in_vm86: | |
277 | local_irq_enable(); | |
278 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | |
279 | return; | |
280 | ||
281 | gp_in_kernel: | |
13485ab5 AH |
282 | if (fixup_exception(regs)) |
283 | return; | |
284 | ||
285 | tsk->thread.error_code = error_code; | |
286 | tsk->thread.trap_no = 13; | |
287 | if (notify_die(DIE_GPF, "general protection fault", regs, | |
1da177e4 | 288 | error_code, 13, SIGSEGV) == NOTIFY_STOP) |
13485ab5 AH |
289 | return; |
290 | die("general protection fault", regs, error_code); | |
1da177e4 LT |
291 | } |
292 | ||
5deb45e3 | 293 | static notrace __kprobes void |
b5964405 | 294 | mem_parity_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 | 295 | { |
b5964405 IM |
296 | printk(KERN_EMERG |
297 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | |
298 | reason, smp_processor_id()); | |
299 | ||
300 | printk(KERN_EMERG | |
301 | "You have some hardware problem, likely on the PCI bus.\n"); | |
c0d12172 DJ |
302 | |
303 | #if defined(CONFIG_EDAC) | |
b5964405 | 304 | if (edac_handler_set()) { |
c0d12172 DJ |
305 | edac_atomic_assert_error(); |
306 | return; | |
307 | } | |
308 | #endif | |
309 | ||
8da5adda | 310 | if (panic_on_unrecovered_nmi) |
b5964405 | 311 | panic("NMI: Not continuing"); |
1da177e4 | 312 | |
c41c5cd3 | 313 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); |
1da177e4 LT |
314 | |
315 | /* Clear and disable the memory parity error line. */ | |
316 | clear_mem_error(reason); | |
317 | } | |
318 | ||
5deb45e3 | 319 | static notrace __kprobes void |
b5964405 | 320 | io_check_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 LT |
321 | { |
322 | unsigned long i; | |
323 | ||
9c107805 | 324 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); |
1da177e4 LT |
325 | show_registers(regs); |
326 | ||
327 | /* Re-enable the IOCK line, wait for a few seconds */ | |
328 | reason = (reason & 0xf) | 8; | |
329 | outb(reason, 0x61); | |
b5964405 | 330 | |
1da177e4 | 331 | i = 2000; |
b5964405 IM |
332 | while (--i) |
333 | udelay(1000); | |
334 | ||
1da177e4 LT |
335 | reason &= ~8; |
336 | outb(reason, 0x61); | |
337 | } | |
338 | ||
5deb45e3 | 339 | static notrace __kprobes void |
b5964405 | 340 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 | 341 | { |
d3597524 JW |
342 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
343 | return; | |
1da177e4 | 344 | #ifdef CONFIG_MCA |
b5964405 IM |
345 | /* |
346 | * Might actually be able to figure out what the guilty party | |
347 | * is: | |
348 | */ | |
349 | if (MCA_bus) { | |
1da177e4 LT |
350 | mca_handle_nmi(); |
351 | return; | |
352 | } | |
353 | #endif | |
b5964405 IM |
354 | printk(KERN_EMERG |
355 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | |
356 | reason, smp_processor_id()); | |
357 | ||
c41c5cd3 | 358 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); |
8da5adda | 359 | if (panic_on_unrecovered_nmi) |
b5964405 | 360 | panic("NMI: Not continuing"); |
8da5adda | 361 | |
c41c5cd3 | 362 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); |
1da177e4 LT |
363 | } |
364 | ||
365 | static DEFINE_SPINLOCK(nmi_print_lock); | |
366 | ||
ddca03c9 | 367 | void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) |
1da177e4 | 368 | { |
ddca03c9 | 369 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
748f2edb GA |
370 | return; |
371 | ||
1da177e4 LT |
372 | spin_lock(&nmi_print_lock); |
373 | /* | |
374 | * We are in trouble anyway, lets at least try | |
b5964405 | 375 | * to get a message out: |
1da177e4 LT |
376 | */ |
377 | bust_spinlocks(1); | |
ddca03c9 | 378 | printk(KERN_EMERG "%s", str); |
65ea5b03 PA |
379 | printk(" on CPU%d, ip %08lx, registers:\n", |
380 | smp_processor_id(), regs->ip); | |
1da177e4 | 381 | show_registers(regs); |
ddca03c9 CG |
382 | if (do_panic) |
383 | panic("Non maskable interrupt"); | |
1da177e4 LT |
384 | console_silent(); |
385 | spin_unlock(&nmi_print_lock); | |
386 | bust_spinlocks(0); | |
6e274d14 | 387 | |
b5964405 IM |
388 | /* |
389 | * If we are in kernel we are probably nested up pretty bad | |
390 | * and might aswell get out now while we still can: | |
391 | */ | |
db753bdf | 392 | if (!user_mode_vm(regs)) { |
6e274d14 AN |
393 | current->thread.trap_no = 2; |
394 | crash_kexec(regs); | |
395 | } | |
396 | ||
1da177e4 LT |
397 | do_exit(SIGSEGV); |
398 | } | |
399 | ||
5deb45e3 | 400 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
1da177e4 LT |
401 | { |
402 | unsigned char reason = 0; | |
abd34807 AH |
403 | int cpu; |
404 | ||
405 | cpu = smp_processor_id(); | |
1da177e4 | 406 | |
abd34807 AH |
407 | /* Only the BSP gets external NMIs from the system. */ |
408 | if (!cpu) | |
1da177e4 | 409 | reason = get_nmi_reason(); |
b5964405 | 410 | |
1da177e4 | 411 | if (!(reason & 0xc0)) { |
20c0d2d4 | 412 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) |
a8c1be9d | 413 | == NOTIFY_STOP) |
1da177e4 LT |
414 | return; |
415 | #ifdef CONFIG_X86_LOCAL_APIC | |
416 | /* | |
417 | * Ok, so this is none of the documented NMI sources, | |
418 | * so it must be the NMI watchdog. | |
419 | */ | |
3adbbcce | 420 | if (nmi_watchdog_tick(regs, reason)) |
1da177e4 | 421 | return; |
abd34807 | 422 | if (!do_nmi_callback(regs, cpu)) |
3adbbcce | 423 | unknown_nmi_error(reason, regs); |
b5964405 IM |
424 | #else |
425 | unknown_nmi_error(reason, regs); | |
426 | #endif | |
2fbe7b25 | 427 | |
1da177e4 LT |
428 | return; |
429 | } | |
20c0d2d4 | 430 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
1da177e4 | 431 | return; |
a8c1be9d AH |
432 | |
433 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | |
1da177e4 LT |
434 | if (reason & 0x80) |
435 | mem_parity_error(reason, regs); | |
436 | if (reason & 0x40) | |
437 | io_check_error(reason, regs); | |
438 | /* | |
439 | * Reassert NMI in case it became active meanwhile | |
b5964405 | 440 | * as it's edge-triggered: |
1da177e4 LT |
441 | */ |
442 | reassert_nmi(); | |
443 | } | |
444 | ||
e407d620 AH |
445 | dotraplinkage notrace __kprobes void |
446 | do_nmi(struct pt_regs *regs, long error_code) | |
1da177e4 LT |
447 | { |
448 | int cpu; | |
449 | ||
450 | nmi_enter(); | |
451 | ||
452 | cpu = smp_processor_id(); | |
f3705136 | 453 | |
1da177e4 LT |
454 | ++nmi_count(cpu); |
455 | ||
8f4e956b AK |
456 | if (!ignore_nmis) |
457 | default_do_nmi(regs); | |
1da177e4 LT |
458 | |
459 | nmi_exit(); | |
460 | } | |
461 | ||
8f4e956b AK |
462 | void stop_nmi(void) |
463 | { | |
464 | acpi_nmi_disable(); | |
465 | ignore_nmis++; | |
466 | } | |
467 | ||
468 | void restart_nmi(void) | |
469 | { | |
470 | ignore_nmis--; | |
471 | acpi_nmi_enable(); | |
472 | } | |
473 | ||
e407d620 | 474 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
1da177e4 | 475 | { |
b94da1e4 | 476 | #ifdef CONFIG_KPROBES |
1da177e4 LT |
477 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
478 | == NOTIFY_STOP) | |
48c88211 | 479 | return; |
762db434 | 480 | conditional_sti(regs); |
b94da1e4 AH |
481 | #else |
482 | if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) | |
483 | == NOTIFY_STOP) | |
484 | return; | |
485 | #endif | |
b5964405 | 486 | |
3c1326f8 | 487 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
1da177e4 | 488 | } |
1da177e4 LT |
489 | |
490 | /* | |
491 | * Our handling of the processor debug registers is non-trivial. | |
492 | * We do not clear them on entry and exit from the kernel. Therefore | |
493 | * it is possible to get a watchpoint trap here from inside the kernel. | |
494 | * However, the code in ./ptrace.c has ensured that the user can | |
495 | * only set watchpoints on userspace addresses. Therefore the in-kernel | |
496 | * watchpoint trap can only occur in code which is reading/writing | |
497 | * from user space. Such code must not hold kernel locks (since it | |
498 | * can equally take a page fault), therefore it is safe to call | |
499 | * force_sig_info even though that claims and releases locks. | |
b5964405 | 500 | * |
1da177e4 LT |
501 | * Code in ./signal.c ensures that the debug control register |
502 | * is restored before we deliver any signal, and therefore that | |
503 | * user code runs with the correct debug control register even though | |
504 | * we clear it here. | |
505 | * | |
506 | * Being careful here means that we don't have to be as careful in a | |
507 | * lot of more complicated places (task switching can be a bit lazy | |
508 | * about restoring all the debug state, and ptrace doesn't have to | |
509 | * find every occurrence of the TF bit that could be saved away even | |
510 | * by user code) | |
511 | */ | |
e407d620 | 512 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) |
1da177e4 | 513 | { |
1da177e4 | 514 | struct task_struct *tsk = current; |
3d2a71a5 | 515 | unsigned long condition; |
da654b74 | 516 | int si_code; |
1da177e4 | 517 | |
1cc6f12e | 518 | get_debugreg(condition, 6); |
1da177e4 | 519 | |
10faa81e RM |
520 | /* |
521 | * The processor cleared BTF, so don't mark that we need it set. | |
522 | */ | |
523 | clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); | |
524 | tsk->thread.debugctlmsr = 0; | |
525 | ||
1da177e4 | 526 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, |
a8c1be9d | 527 | SIGTRAP) == NOTIFY_STOP) |
1da177e4 | 528 | return; |
3d2a71a5 | 529 | |
1da177e4 | 530 | /* It's safe to allow irq's after DR6 has been saved */ |
3d2a71a5 | 531 | preempt_conditional_sti(regs); |
1da177e4 LT |
532 | |
533 | /* Mask out spurious debug traps due to lazy DR7 setting */ | |
534 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | |
0f534093 | 535 | if (!tsk->thread.debugreg7) |
1da177e4 LT |
536 | goto clear_dr7; |
537 | } | |
538 | ||
6b6891f9 | 539 | if (regs->flags & X86_VM_MASK) |
1da177e4 LT |
540 | goto debug_vm86; |
541 | ||
542 | /* Save debug status register where ptrace can see it */ | |
0f534093 | 543 | tsk->thread.debugreg6 = condition; |
1da177e4 LT |
544 | |
545 | /* | |
546 | * Single-stepping through TF: make sure we ignore any events in | |
547 | * kernel space (but re-enable TF when returning to user mode). | |
548 | */ | |
549 | if (condition & DR_STEP) { | |
717b594a | 550 | if (!user_mode(regs)) |
1da177e4 LT |
551 | goto clear_TF_reenable; |
552 | } | |
553 | ||
3d2a71a5 | 554 | si_code = get_si_code(condition); |
1da177e4 | 555 | /* Ok, finally something we can handle */ |
da654b74 | 556 | send_sigtrap(tsk, regs, error_code, si_code); |
1da177e4 | 557 | |
b5964405 IM |
558 | /* |
559 | * Disable additional traps. They'll be re-enabled when | |
1da177e4 LT |
560 | * the signal is delivered. |
561 | */ | |
562 | clear_dr7: | |
1cc6f12e | 563 | set_debugreg(0, 7); |
3d2a71a5 | 564 | preempt_conditional_cli(regs); |
1da177e4 LT |
565 | return; |
566 | ||
567 | debug_vm86: | |
568 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | |
3d2a71a5 | 569 | preempt_conditional_cli(regs); |
1da177e4 LT |
570 | return; |
571 | ||
572 | clear_TF_reenable: | |
573 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | |
6093015d | 574 | regs->flags &= ~X86_EFLAGS_TF; |
3d2a71a5 | 575 | preempt_conditional_cli(regs); |
1da177e4 LT |
576 | return; |
577 | } | |
578 | ||
579 | /* | |
580 | * Note that we play around with the 'TS' bit in an attempt to get | |
581 | * the correct behaviour even in the presence of the asynchronous | |
582 | * IRQ13 behaviour | |
583 | */ | |
65ea5b03 | 584 | void math_error(void __user *ip) |
1da177e4 | 585 | { |
b5964405 | 586 | struct task_struct *task; |
1da177e4 | 587 | siginfo_t info; |
7b4fd4bb | 588 | unsigned short cwd, swd; |
1da177e4 LT |
589 | |
590 | /* | |
591 | * Save the info for the exception handler and clear the error. | |
592 | */ | |
593 | task = current; | |
594 | save_init_fpu(task); | |
595 | task->thread.trap_no = 16; | |
596 | task->thread.error_code = 0; | |
597 | info.si_signo = SIGFPE; | |
598 | info.si_errno = 0; | |
599 | info.si_code = __SI_FAULT; | |
65ea5b03 | 600 | info.si_addr = ip; |
1da177e4 LT |
601 | /* |
602 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
603 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
604 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
605 | * fault bit. We should only be taking one exception at a time, | |
606 | * so if this combination doesn't produce any single exception, | |
a8c1be9d | 607 | * then we have a bad program that isn't synchronizing its FPU usage |
1da177e4 LT |
608 | * and it will suffer the consequences since we won't be able to |
609 | * fully reproduce the context of the exception | |
610 | */ | |
611 | cwd = get_fpu_cwd(task); | |
612 | swd = get_fpu_swd(task); | |
b1daec30 | 613 | switch (swd & ~cwd & 0x3f) { |
b5964405 IM |
614 | case 0x000: /* No unmasked exception */ |
615 | return; | |
a8c1be9d | 616 | default: /* Multiple exceptions */ |
b5964405 IM |
617 | break; |
618 | case 0x001: /* Invalid Op */ | |
619 | /* | |
620 | * swd & 0x240 == 0x040: Stack Underflow | |
621 | * swd & 0x240 == 0x240: Stack Overflow | |
622 | * User must clear the SF bit (0x40) if set | |
623 | */ | |
624 | info.si_code = FPE_FLTINV; | |
625 | break; | |
626 | case 0x002: /* Denormalize */ | |
627 | case 0x010: /* Underflow */ | |
628 | info.si_code = FPE_FLTUND; | |
629 | break; | |
630 | case 0x004: /* Zero Divide */ | |
631 | info.si_code = FPE_FLTDIV; | |
632 | break; | |
633 | case 0x008: /* Overflow */ | |
634 | info.si_code = FPE_FLTOVF; | |
635 | break; | |
636 | case 0x020: /* Precision */ | |
637 | info.si_code = FPE_FLTRES; | |
638 | break; | |
1da177e4 LT |
639 | } |
640 | force_sig_info(SIGFPE, &info, task); | |
641 | } | |
642 | ||
e407d620 | 643 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
1da177e4 | 644 | { |
252d28fe | 645 | conditional_sti(regs); |
1da177e4 | 646 | ignore_fpu_irq = 1; |
65ea5b03 | 647 | math_error((void __user *)regs->ip); |
1da177e4 LT |
648 | } |
649 | ||
65ea5b03 | 650 | static void simd_math_error(void __user *ip) |
1da177e4 | 651 | { |
b5964405 | 652 | struct task_struct *task; |
b5964405 | 653 | siginfo_t info; |
7b4fd4bb | 654 | unsigned short mxcsr; |
1da177e4 LT |
655 | |
656 | /* | |
657 | * Save the info for the exception handler and clear the error. | |
658 | */ | |
659 | task = current; | |
660 | save_init_fpu(task); | |
661 | task->thread.trap_no = 19; | |
662 | task->thread.error_code = 0; | |
663 | info.si_signo = SIGFPE; | |
664 | info.si_errno = 0; | |
665 | info.si_code = __SI_FAULT; | |
65ea5b03 | 666 | info.si_addr = ip; |
1da177e4 LT |
667 | /* |
668 | * The SIMD FPU exceptions are handled a little differently, as there | |
669 | * is only a single status/control register. Thus, to determine which | |
670 | * unmasked exception was caught we must mask the exception mask bits | |
671 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
672 | */ | |
673 | mxcsr = get_fpu_mxcsr(task); | |
674 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | |
b5964405 IM |
675 | case 0x000: |
676 | default: | |
677 | break; | |
678 | case 0x001: /* Invalid Op */ | |
679 | info.si_code = FPE_FLTINV; | |
680 | break; | |
681 | case 0x002: /* Denormalize */ | |
682 | case 0x010: /* Underflow */ | |
683 | info.si_code = FPE_FLTUND; | |
684 | break; | |
685 | case 0x004: /* Zero Divide */ | |
686 | info.si_code = FPE_FLTDIV; | |
687 | break; | |
688 | case 0x008: /* Overflow */ | |
689 | info.si_code = FPE_FLTOVF; | |
690 | break; | |
691 | case 0x020: /* Precision */ | |
692 | info.si_code = FPE_FLTRES; | |
693 | break; | |
1da177e4 LT |
694 | } |
695 | force_sig_info(SIGFPE, &info, task); | |
696 | } | |
697 | ||
e407d620 AH |
698 | dotraplinkage void |
699 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | |
1da177e4 | 700 | { |
b939bde2 AH |
701 | conditional_sti(regs); |
702 | ||
1da177e4 LT |
703 | if (cpu_has_xmm) { |
704 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | |
705 | ignore_fpu_irq = 1; | |
65ea5b03 | 706 | simd_math_error((void __user *)regs->ip); |
b5964405 IM |
707 | return; |
708 | } | |
709 | /* | |
710 | * Handle strange cache flush from user space exception | |
711 | * in all other cases. This is undocumented behaviour. | |
712 | */ | |
6b6891f9 | 713 | if (regs->flags & X86_VM_MASK) { |
b5964405 IM |
714 | handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); |
715 | return; | |
1da177e4 | 716 | } |
b5964405 IM |
717 | current->thread.trap_no = 19; |
718 | current->thread.error_code = error_code; | |
719 | die_if_kernel("cache flush denied", regs, error_code); | |
720 | force_sig(SIGSEGV, current); | |
1da177e4 LT |
721 | } |
722 | ||
e407d620 AH |
723 | dotraplinkage void |
724 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |
1da177e4 | 725 | { |
cf81978d | 726 | conditional_sti(regs); |
1da177e4 LT |
727 | #if 0 |
728 | /* No need to warn about this any longer. */ | |
b5964405 | 729 | printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
1da177e4 LT |
730 | #endif |
731 | } | |
732 | ||
b5964405 | 733 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) |
1da177e4 | 734 | { |
736f12bf | 735 | struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); |
be44d2aa SS |
736 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; |
737 | unsigned long new_kesp = kesp - base; | |
738 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | |
739 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | |
b5964405 | 740 | |
be44d2aa | 741 | /* Set up base for espfix segment */ |
b5964405 IM |
742 | desc &= 0x00f0ff0000000000ULL; |
743 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | |
be44d2aa SS |
744 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | |
745 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | |
746 | (lim_pages & 0xffff); | |
747 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | |
b5964405 | 748 | |
be44d2aa | 749 | return new_kesp; |
1da177e4 LT |
750 | } |
751 | ||
752 | /* | |
b5964405 | 753 | * 'math_state_restore()' saves the current math information in the |
1da177e4 LT |
754 | * old math state array, and gets the new ones from the current task |
755 | * | |
756 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
757 | * Don't touch unless you *really* know how it works. | |
758 | * | |
759 | * Must be called with kernel preemption disabled (in this case, | |
760 | * local interrupts are disabled at the call-site in entry.S). | |
761 | */ | |
acc20761 | 762 | asmlinkage void math_state_restore(void) |
1da177e4 LT |
763 | { |
764 | struct thread_info *thread = current_thread_info(); | |
765 | struct task_struct *tsk = thread->task; | |
766 | ||
aa283f49 SS |
767 | if (!tsk_used_math(tsk)) { |
768 | local_irq_enable(); | |
769 | /* | |
770 | * does a slab alloc which can sleep | |
771 | */ | |
772 | if (init_fpu(tsk)) { | |
773 | /* | |
774 | * ran out of memory! | |
775 | */ | |
776 | do_group_exit(SIGKILL); | |
777 | return; | |
778 | } | |
779 | local_irq_disable(); | |
780 | } | |
781 | ||
b5964405 | 782 | clts(); /* Allow maths ops (or we recurse) */ |
1da177e4 LT |
783 | restore_fpu(tsk); |
784 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | |
acc20761 | 785 | tsk->fpu_counter++; |
1da177e4 | 786 | } |
5992b6da | 787 | EXPORT_SYMBOL_GPL(math_state_restore); |
1da177e4 LT |
788 | |
789 | #ifndef CONFIG_MATH_EMULATION | |
790 | ||
791 | asmlinkage void math_emulate(long arg) | |
792 | { | |
b5964405 IM |
793 | printk(KERN_EMERG |
794 | "math-emulation not enabled and no coprocessor found.\n"); | |
795 | printk(KERN_EMERG "killing %s.\n", current->comm); | |
796 | force_sig(SIGFPE, current); | |
1da177e4 LT |
797 | schedule(); |
798 | } | |
799 | ||
800 | #endif /* CONFIG_MATH_EMULATION */ | |
801 | ||
e407d620 AH |
802 | dotraplinkage void __kprobes |
803 | do_device_not_available(struct pt_regs *regs, long error) | |
7643e9b9 AH |
804 | { |
805 | if (read_cr0() & X86_CR0_EM) { | |
806 | conditional_sti(regs); | |
807 | math_emulate(0); | |
808 | } else { | |
809 | math_state_restore(); /* interrupts still off */ | |
810 | conditional_sti(regs); | |
811 | } | |
812 | } | |
813 | ||
eb642f62 | 814 | #ifdef CONFIG_X86_MCE |
e407d620 | 815 | dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error) |
eb642f62 AH |
816 | { |
817 | conditional_sti(regs); | |
818 | machine_check_vector(regs, error); | |
819 | } | |
820 | #endif | |
821 | ||
e407d620 | 822 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
f8e0870f AH |
823 | { |
824 | siginfo_t info; | |
825 | local_irq_enable(); | |
826 | ||
827 | info.si_signo = SIGILL; | |
828 | info.si_errno = 0; | |
829 | info.si_code = ILL_BADSTK; | |
830 | info.si_addr = 0; | |
831 | if (notify_die(DIE_TRAP, "iret exception", | |
832 | regs, error_code, 32, SIGILL) == NOTIFY_STOP) | |
833 | return; | |
3c1326f8 | 834 | do_trap(32, SIGILL, "iret exception", regs, error_code, &info); |
f8e0870f AH |
835 | } |
836 | ||
1da177e4 LT |
837 | void __init trap_init(void) |
838 | { | |
dbeb2be2 RR |
839 | int i; |
840 | ||
1da177e4 | 841 | #ifdef CONFIG_EISA |
927222b1 | 842 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
b5964405 IM |
843 | |
844 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | |
1da177e4 | 845 | EISA_bus = 1; |
927222b1 | 846 | early_iounmap(p, 4); |
1da177e4 LT |
847 | #endif |
848 | ||
976382dc | 849 | set_intr_gate(0, ÷_error); |
699d2937 AH |
850 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
851 | set_intr_gate_ist(2, &nmi, NMI_STACK); | |
852 | /* int3 can be called from all */ | |
853 | set_system_intr_gate_ist(3, &int3, DEBUG_STACK); | |
854 | /* int4 can be called from all */ | |
855 | set_system_intr_gate(4, &overflow); | |
64f644c0 | 856 | set_intr_gate(5, &bounds); |
12394cf5 | 857 | set_intr_gate(6, &invalid_op); |
7643e9b9 | 858 | set_intr_gate(7, &device_not_available); |
a8c1be9d | 859 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); |
51bc1ed6 | 860 | set_intr_gate(9, &coprocessor_segment_overrun); |
6bf77bf9 | 861 | set_intr_gate(10, &invalid_TSS); |
36d936c7 | 862 | set_intr_gate(11, &segment_not_present); |
699d2937 | 863 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); |
c6df0d71 | 864 | set_intr_gate(13, &general_protection); |
b5964405 | 865 | set_intr_gate(14, &page_fault); |
cf81978d | 866 | set_intr_gate(15, &spurious_interrupt_bug); |
252d28fe | 867 | set_intr_gate(16, &coprocessor_error); |
5feedfd4 | 868 | set_intr_gate(17, &alignment_check); |
1da177e4 | 869 | #ifdef CONFIG_X86_MCE |
699d2937 | 870 | set_intr_gate_ist(18, &machine_check, MCE_STACK); |
1da177e4 | 871 | #endif |
b939bde2 | 872 | set_intr_gate(19, &simd_coprocessor_error); |
1da177e4 | 873 | |
d43c6e80 | 874 | if (cpu_has_fxsr) { |
d43c6e80 JB |
875 | printk(KERN_INFO "Enabling fast FPU save and restore... "); |
876 | set_in_cr4(X86_CR4_OSFXSR); | |
877 | printk("done.\n"); | |
878 | } | |
879 | if (cpu_has_xmm) { | |
b5964405 IM |
880 | printk(KERN_INFO |
881 | "Enabling unmasked SIMD FPU exception support... "); | |
d43c6e80 JB |
882 | set_in_cr4(X86_CR4_OSXMMEXCPT); |
883 | printk("done.\n"); | |
884 | } | |
885 | ||
699d2937 | 886 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
1da177e4 | 887 | |
b5964405 | 888 | /* Reserve all the builtin and the syscall vector: */ |
dbeb2be2 RR |
889 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
890 | set_bit(i, used_vectors); | |
b5964405 | 891 | |
dbeb2be2 RR |
892 | set_bit(SYSCALL_VECTOR, used_vectors); |
893 | ||
1da177e4 | 894 | /* |
b5964405 | 895 | * Should be a barrier for any external CPU state: |
1da177e4 LT |
896 | */ |
897 | cpu_init(); | |
898 | ||
899 | trap_init_hook(); | |
900 | } |