[IA64] remove dead code: __cpu_{down,die} from !HOTPLUG_CPU
[deliverable/linux.git] / arch / ia64 / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * Architecture-specific trap handling.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
8 */
9
1da177e4
LT
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/tty.h>
14#include <linux/vt_kern.h> /* For unblank_screen() */
15#include <linux/module.h> /* for EXPORT_SYMBOL */
16#include <linux/hardirq.h>
1f7ad57b 17#include <linux/kprobes.h>
b05de01a 18#include <linux/delay.h> /* for ssleep() */
1eeb66a1 19#include <linux/kdebug.h>
1da177e4
LT
20
21#include <asm/fpswa.h>
22#include <asm/ia32.h>
23#include <asm/intrinsics.h>
24#include <asm/processor.h>
25#include <asm/uaccess.h>
26
1da177e4
LT
27fpswa_interface_t *fpswa_interface;
28EXPORT_SYMBOL(fpswa_interface);
29
30void __init
31trap_init (void)
32{
33 if (ia64_boot_param->fpswa)
34 /* FPSWA fixup: make the interface pointer a kernel virtual address: */
35 fpswa_interface = __va(ia64_boot_param->fpswa);
36}
37
1da177e4
LT
38void
39die (const char *str, struct pt_regs *regs, long err)
40{
41 static struct {
42 spinlock_t lock;
43 u32 lock_owner;
44 int lock_owner_depth;
45 } die = {
8737d595
MAC
46 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
47 .lock_owner = -1,
48 .lock_owner_depth = 0
1da177e4
LT
49 };
50 static int die_counter;
af25e94d 51 int cpu = get_cpu();
1da177e4 52
af25e94d 53 if (die.lock_owner != cpu) {
1da177e4
LT
54 console_verbose();
55 spin_lock_irq(&die.lock);
af25e94d 56 die.lock_owner = cpu;
1da177e4
LT
57 die.lock_owner_depth = 0;
58 bust_spinlocks(1);
59 }
af25e94d 60 put_cpu();
1da177e4
LT
61
62 if (++die.lock_owner_depth < 3) {
63 printk("%s[%d]: %s %ld [%d]\n",
19c5870c 64 current->comm, task_pid_nr(current), str, err, ++die_counter);
9138d581 65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
1da177e4
LT
66 show_regs(regs);
67 } else
68 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
69
70 bust_spinlocks(0);
71 die.lock_owner = -1;
bcdcd8e7 72 add_taint(TAINT_DIE);
1da177e4 73 spin_unlock_irq(&die.lock);
b05de01a 74
cea6a4ba 75 if (panic_on_oops)
012c437d 76 panic("Fatal exception");
b05de01a 77
1da177e4
LT
78 do_exit(SIGSEGV);
79}
80
81void
82die_if_kernel (char *str, struct pt_regs *regs, long err)
83{
84 if (!user_mode(regs))
85 die(str, regs, err);
86}
87
88void
1f7ad57b 89__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
1da177e4
LT
90{
91 siginfo_t siginfo;
92 int sig, code;
93
94 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
95 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
96 siginfo.si_imm = break_num;
97 siginfo.si_flags = 0; /* clear __ISR_VALID */
98 siginfo.si_isr = 0;
99
100 switch (break_num) {
101 case 0: /* unknown error (used by GCC for __builtin_abort()) */
7213b252 102 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
9138d581 103 == NOTIFY_STOP)
7213b252 104 return;
1da177e4
LT
105 die_if_kernel("bugcheck!", regs, break_num);
106 sig = SIGILL; code = ILL_ILLOPC;
107 break;
108
109 case 1: /* integer divide by zero */
110 sig = SIGFPE; code = FPE_INTDIV;
111 break;
112
113 case 2: /* integer overflow */
114 sig = SIGFPE; code = FPE_INTOVF;
115 break;
116
117 case 3: /* range check/bounds check */
118 sig = SIGFPE; code = FPE_FLTSUB;
119 break;
120
121 case 4: /* null pointer dereference */
122 sig = SIGSEGV; code = SEGV_MAPERR;
123 break;
124
125 case 5: /* misaligned data */
126 sig = SIGSEGV; code = BUS_ADRALN;
127 break;
128
129 case 6: /* decimal overflow */
130 sig = SIGFPE; code = __FPE_DECOVF;
131 break;
132
133 case 7: /* decimal divide by zero */
134 sig = SIGFPE; code = __FPE_DECDIV;
135 break;
136
137 case 8: /* packed decimal error */
138 sig = SIGFPE; code = __FPE_DECERR;
139 break;
140
141 case 9: /* invalid ASCII digit */
142 sig = SIGFPE; code = __FPE_INVASC;
143 break;
144
145 case 10: /* invalid decimal digit */
146 sig = SIGFPE; code = __FPE_INVDEC;
147 break;
148
149 case 11: /* paragraph stack overflow */
150 sig = SIGSEGV; code = __SEGV_PSTKOVF;
151 break;
152
153 case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
154 sig = SIGILL; code = __ILL_BNDMOD;
155 break;
156
157 default:
158 if (break_num < 0x40000 || break_num > 0x100000)
159 die_if_kernel("Bad break", regs, break_num);
160
161 if (break_num < 0x80000) {
162 sig = SIGILL; code = __ILL_BREAK;
163 } else {
9138d581
KO
164 if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
165 == NOTIFY_STOP)
166 return;
1da177e4
LT
167 sig = SIGTRAP; code = TRAP_BRKPT;
168 }
169 }
170 siginfo.si_signo = sig;
171 siginfo.si_errno = 0;
172 siginfo.si_code = code;
173 force_sig_info(sig, &siginfo, current);
174}
175
176/*
177 * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
178 * and it doesn't own the fp-high register partition. When this happens, we save the
179 * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
180 * the fp-high partition of the current task (if necessary). Note that the kernel has
181 * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
182 * care of clearing psr.dfh.
183 */
184static inline void
185disabled_fph_fault (struct pt_regs *regs)
186{
187 struct ia64_psr *psr = ia64_psr(regs);
188
189 /* first, grant user-level access to fph partition: */
190 psr->dfh = 0;
05062d96
PC
191
192 /*
193 * Make sure that no other task gets in on this processor
194 * while we're claiming the FPU
195 */
196 preempt_disable();
1da177e4
LT
197#ifndef CONFIG_SMP
198 {
199 struct task_struct *fpu_owner
200 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
201
05062d96
PC
202 if (ia64_is_local_fpu_owner(current)) {
203 preempt_enable_no_resched();
1da177e4 204 return;
05062d96 205 }
1da177e4
LT
206
207 if (fpu_owner)
208 ia64_flush_fph(fpu_owner);
209 }
210#endif /* !CONFIG_SMP */
211 ia64_set_local_fpu_owner(current);
212 if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
213 __ia64_load_fpu(current->thread.fph);
214 psr->mfh = 0;
215 } else {
216 __ia64_init_fpu();
217 /*
218 * Set mfh because the state in thread.fph does not match the state in
219 * the fph partition.
220 */
221 psr->mfh = 1;
222 }
05062d96 223 preempt_enable_no_resched();
1da177e4
LT
224}
225
226static inline int
227fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
228 struct pt_regs *regs)
229{
230 fp_state_t fp_state;
231 fpswa_ret_t ret;
232
233 if (!fpswa_interface)
234 return -1;
235
236 memset(&fp_state, 0, sizeof(fp_state_t));
237
238 /*
239 * compute fp_state. only FP registers f6 - f11 are used by the
240 * kernel, so set those bits in the mask and set the low volatile
241 * pointer to point to these registers.
242 */
243 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
244
245 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
246 /*
247 * unsigned long (*EFI_FPSWA) (
248 * unsigned long trap_type,
249 * void *Bundle,
250 * unsigned long *pipsr,
251 * unsigned long *pfsr,
252 * unsigned long *pisr,
253 * unsigned long *ppreds,
254 * unsigned long *pifs,
255 * void *fp_state);
256 */
257 ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
258 (unsigned long *) ipsr, (unsigned long *) fpsr,
259 (unsigned long *) isr, (unsigned long *) pr,
260 (unsigned long *) ifs, &fp_state);
261
262 return ret.status;
263}
264
1cf24bdb
JS
265struct fpu_swa_msg {
266 unsigned long count;
267 unsigned long time;
268};
269static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
270DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
271static struct fpu_swa_msg last __cacheline_aligned;
272
273
1da177e4
LT
274/*
275 * Handle floating-point assist faults and traps.
276 */
277static int
278handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
279{
280 long exception, bundle[2];
281 unsigned long fault_ip;
282 struct siginfo siginfo;
1da177e4
LT
283
284 fault_ip = regs->cr_iip;
285 if (!fp_fault && (ia64_psr(regs)->ri == 0))
286 fault_ip -= 16;
287 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
288 return -1;
289
1cf24bdb
JS
290 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
291 unsigned long count, current_jiffies = jiffies;
292 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
293
294 if (unlikely(current_jiffies > cp->time))
295 cp->count = 0;
296 if (unlikely(cp->count < 5)) {
297 cp->count++;
298 cp->time = current_jiffies + 5 * HZ;
299
300 /* minimize races by grabbing a copy of count BEFORE checking last.time. */
301 count = last.count;
302 barrier();
303
304 /*
305 * Lower 4 bits are used as a count. Upper bits are a sequence
306 * number that is updated when count is reset. The cmpxchg will
307 * fail is seqno has changed. This minimizes mutiple cpus
72fdbdce 308 * resetting the count.
1cf24bdb
JS
309 */
310 if (current_jiffies > last.time)
311 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
312
313 /* used fetchadd to atomically update the count */
314 if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
315 last.time = current_jiffies + 5 * HZ;
316 printk(KERN_WARNING
317 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
19c5870c 318 current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
1cf24bdb
JS
319 }
320 }
1da177e4
LT
321 }
322
323 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
324 &regs->cr_ifs, regs);
325 if (fp_fault) {
326 if (exception == 0) {
327 /* emulation was successful */
328 ia64_increment_ip(regs);
329 } else if (exception == -1) {
330 printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
331 return -1;
332 } else {
333 /* is next instruction a trap? */
334 if (exception & 2) {
335 ia64_increment_ip(regs);
336 }
337 siginfo.si_signo = SIGFPE;
338 siginfo.si_errno = 0;
339 siginfo.si_code = __SI_FAULT; /* default code */
340 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
341 if (isr & 0x11) {
342 siginfo.si_code = FPE_FLTINV;
343 } else if (isr & 0x22) {
344 /* denormal operand gets the same si_code as underflow
345 * see arch/i386/kernel/traps.c:math_error() */
346 siginfo.si_code = FPE_FLTUND;
347 } else if (isr & 0x44) {
348 siginfo.si_code = FPE_FLTDIV;
349 }
350 siginfo.si_isr = isr;
351 siginfo.si_flags = __ISR_VALID;
352 siginfo.si_imm = 0;
353 force_sig_info(SIGFPE, &siginfo, current);
354 }
355 } else {
356 if (exception == -1) {
357 printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
358 return -1;
359 } else if (exception != 0) {
360 /* raise exception */
361 siginfo.si_signo = SIGFPE;
362 siginfo.si_errno = 0;
363 siginfo.si_code = __SI_FAULT; /* default code */
364 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
365 if (isr & 0x880) {
366 siginfo.si_code = FPE_FLTOVF;
367 } else if (isr & 0x1100) {
368 siginfo.si_code = FPE_FLTUND;
369 } else if (isr & 0x2200) {
370 siginfo.si_code = FPE_FLTRES;
371 }
372 siginfo.si_isr = isr;
373 siginfo.si_flags = __ISR_VALID;
374 siginfo.si_imm = 0;
375 force_sig_info(SIGFPE, &siginfo, current);
376 }
377 }
378 return 0;
379}
380
381struct illegal_op_return {
382 unsigned long fkt, arg1, arg2, arg3;
383};
384
385struct illegal_op_return
386ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
387 long arg4, long arg5, long arg6, long arg7,
388 struct pt_regs regs)
389{
390 struct illegal_op_return rv;
391 struct siginfo si;
392 char buf[128];
393
394#ifdef CONFIG_IA64_BRL_EMU
395 {
396 extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
397
398 rv = ia64_emulate_brl(&regs, ec);
399 if (rv.fkt != (unsigned long) -1)
400 return rv;
401 }
402#endif
403
404 sprintf(buf, "IA-64 Illegal operation fault");
405 die_if_kernel(buf, &regs, 0);
406
407 memset(&si, 0, sizeof(si));
408 si.si_signo = SIGILL;
409 si.si_code = ILL_ILLOPC;
410 si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
411 force_sig_info(SIGILL, &si, current);
412 rv.fkt = 0;
413 return rv;
414}
415
1f7ad57b 416void __kprobes
1da177e4
LT
417ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
418 unsigned long iim, unsigned long itir, long arg5, long arg6,
419 long arg7, struct pt_regs regs)
420{
421 unsigned long code, error = isr, iip;
422 struct siginfo siginfo;
423 char buf[128];
424 int result, sig;
425 static const char *reason[] = {
426 "IA-64 Illegal Operation fault",
427 "IA-64 Privileged Operation fault",
428 "IA-64 Privileged Register fault",
429 "IA-64 Reserved Register/Field fault",
430 "Disabled Instruction Set Transition fault",
431 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
432 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
433 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
434 };
435
436 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
437 /*
438 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
439 * the lfetch.
440 */
441 ia64_psr(&regs)->ed = 1;
442 return;
443 }
444
445 iip = regs.cr_iip + ia64_psr(&regs)->ri;
446
447 switch (vector) {
448 case 24: /* General Exception */
449 code = (isr >> 4) & 0xf;
450 sprintf(buf, "General Exception: %s%s", reason[code],
451 (code == 3) ? ((isr & (1UL << 37))
452 ? " (RSE access)" : " (data access)") : "");
453 if (code == 8) {
454# ifdef CONFIG_IA64_PRINT_HAZARDS
455 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
19c5870c 456 current->comm, task_pid_nr(current),
1da177e4
LT
457 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
458# endif
459 return;
460 }
461 break;
462
463 case 25: /* Disabled FP-Register */
464 if (isr & 2) {
465 disabled_fph_fault(&regs);
466 return;
467 }
468 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
469 break;
470
471 case 26: /* NaT Consumption */
472 if (user_mode(&regs)) {
473 void __user *addr;
474
475 if (((isr >> 4) & 0xf) == 2) {
476 /* NaT page consumption */
477 sig = SIGSEGV;
478 code = SEGV_ACCERR;
479 addr = (void __user *) ifa;
480 } else {
481 /* register NaT consumption */
482 sig = SIGILL;
483 code = ILL_ILLOPN;
484 addr = (void __user *) (regs.cr_iip
485 + ia64_psr(&regs)->ri);
486 }
487 siginfo.si_signo = sig;
488 siginfo.si_code = code;
489 siginfo.si_errno = 0;
490 siginfo.si_addr = addr;
491 siginfo.si_imm = vector;
492 siginfo.si_flags = __ISR_VALID;
493 siginfo.si_isr = isr;
494 force_sig_info(sig, &siginfo, current);
495 return;
496 } else if (ia64_done_with_exception(&regs))
497 return;
498 sprintf(buf, "NaT consumption");
499 break;
500
501 case 31: /* Unsupported Data Reference */
502 if (user_mode(&regs)) {
503 siginfo.si_signo = SIGILL;
504 siginfo.si_code = ILL_ILLOPN;
505 siginfo.si_errno = 0;
506 siginfo.si_addr = (void __user *) iip;
507 siginfo.si_imm = vector;
508 siginfo.si_flags = __ISR_VALID;
509 siginfo.si_isr = isr;
510 force_sig_info(SIGILL, &siginfo, current);
511 return;
512 }
513 sprintf(buf, "Unsupported data reference");
514 break;
515
516 case 29: /* Debug */
517 case 35: /* Taken Branch Trap */
518 case 36: /* Single Step Trap */
519 if (fsys_mode(current, &regs)) {
520 extern char __kernel_syscall_via_break[];
521 /*
15029285
JU
522 * Got a trap in fsys-mode: Taken Branch Trap
523 * and Single Step trap need special handling;
524 * Debug trap is ignored (we disable it here
525 * and re-enable it in the lower-privilege trap).
1da177e4
LT
526 */
527 if (unlikely(vector == 29)) {
15029285
JU
528 set_thread_flag(TIF_DB_DISABLED);
529 ia64_psr(&regs)->db = 0;
530 ia64_psr(&regs)->lp = 1;
1da177e4
LT
531 return;
532 }
533 /* re-do the system call via break 0x100000: */
534 regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
535 ia64_psr(&regs)->ri = 0;
536 ia64_psr(&regs)->cpl = 3;
537 return;
538 }
539 switch (vector) {
540 case 29:
541 siginfo.si_code = TRAP_HWBKPT;
542#ifdef CONFIG_ITANIUM
543 /*
544 * Erratum 10 (IFA may contain incorrect address) now has
545 * "NoFix" status. There are no plans for fixing this.
546 */
547 if (ia64_psr(&regs)->is == 0)
548 ifa = regs.cr_iip;
549#endif
550 break;
551 case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
9138d581 552 case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
1da177e4 553 }
9138d581
KO
554 if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
555 == NOTIFY_STOP)
556 return;
1da177e4
LT
557 siginfo.si_signo = SIGTRAP;
558 siginfo.si_errno = 0;
559 siginfo.si_addr = (void __user *) ifa;
560 siginfo.si_imm = 0;
561 siginfo.si_flags = __ISR_VALID;
562 siginfo.si_isr = isr;
563 force_sig_info(SIGTRAP, &siginfo, current);
564 return;
565
566 case 32: /* fp fault */
567 case 33: /* fp trap */
568 result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
569 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
570 siginfo.si_signo = SIGFPE;
571 siginfo.si_errno = 0;
572 siginfo.si_code = FPE_FLTINV;
573 siginfo.si_addr = (void __user *) iip;
574 siginfo.si_flags = __ISR_VALID;
575 siginfo.si_isr = isr;
576 siginfo.si_imm = 0;
577 force_sig_info(SIGFPE, &siginfo, current);
578 }
579 return;
580
581 case 34:
582 if (isr & 0x2) {
583 /* Lower-Privilege Transfer Trap */
15029285
JU
584
585 /* If we disabled debug traps during an fsyscall,
586 * re-enable them here.
587 */
588 if (test_thread_flag(TIF_DB_DISABLED)) {
589 clear_thread_flag(TIF_DB_DISABLED);
590 ia64_psr(&regs)->db = 1;
591 }
592
1da177e4 593 /*
15029285
JU
594 * Just clear PSR.lp and then return immediately:
595 * all the interesting work (e.g., signal delivery)
596 * is done in the kernel exit path.
1da177e4
LT
597 */
598 ia64_psr(&regs)->lp = 0;
599 return;
600 } else {
601 /* Unimplemented Instr. Address Trap */
602 if (user_mode(&regs)) {
603 siginfo.si_signo = SIGILL;
604 siginfo.si_code = ILL_BADIADDR;
605 siginfo.si_errno = 0;
606 siginfo.si_flags = 0;
607 siginfo.si_isr = 0;
608 siginfo.si_imm = 0;
609 siginfo.si_addr = (void __user *) iip;
610 force_sig_info(SIGILL, &siginfo, current);
611 return;
612 }
613 sprintf(buf, "Unimplemented Instruction Address fault");
614 }
615 break;
616
617 case 45:
618#ifdef CONFIG_IA32_SUPPORT
619 if (ia32_exception(&regs, isr) == 0)
620 return;
621#endif
622 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
623 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
624 iip, ifa, isr);
625 force_sig(SIGSEGV, current);
626 break;
627
628 case 46:
629#ifdef CONFIG_IA32_SUPPORT
630 if (ia32_intercept(&regs, isr) == 0)
631 return;
632#endif
633 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
634 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
635 iip, ifa, isr, iim);
636 force_sig(SIGSEGV, current);
637 return;
638
639 case 47:
640 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
641 break;
642
643 default:
644 sprintf(buf, "Fault %lu", vector);
645 break;
646 }
647 die_if_kernel(buf, &regs, error);
648 force_sig(SIGILL, current);
649}
This page took 0.319397 seconds and 5 git commands to generate.