Merge 2.6.38-rc5 into staging-next
[deliverable/linux.git] / arch / s390 / kernel / traps.c
1 /*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13 /*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/tracehook.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/s390_ext.h>
43 #include <asm/lowcore.h>
44 #include <asm/debug.h>
45 #include "entry.h"
46
47 pgm_check_handler_t *pgm_check_table[128];
48
49 int show_unhandled_signals;
50
51 extern pgm_check_handler_t do_protection_exception;
52 extern pgm_check_handler_t do_dat_exception;
53 extern pgm_check_handler_t do_asce_exception;
54
55 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
56
57 #ifndef CONFIG_64BIT
58 #define LONG "%08lx "
59 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
60 static int kstack_depth_to_print = 12;
61 #else /* CONFIG_64BIT */
62 #define LONG "%016lx "
63 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
64 static int kstack_depth_to_print = 20;
65 #endif /* CONFIG_64BIT */
66
67 /*
68 * For show_trace we have tree different stack to consider:
69 * - the panic stack which is used if the kernel stack has overflown
70 * - the asynchronous interrupt stack (cpu related)
71 * - the synchronous kernel stack (process related)
72 * The stack trace can start at any of the three stack and can potentially
73 * touch all of them. The order is: panic stack, async stack, sync stack.
74 */
75 static unsigned long
76 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
77 {
78 struct stack_frame *sf;
79 struct pt_regs *regs;
80
81 while (1) {
82 sp = sp & PSW_ADDR_INSN;
83 if (sp < low || sp > high - sizeof(*sf))
84 return sp;
85 sf = (struct stack_frame *) sp;
86 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
87 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
88 /* Follow the backchain. */
89 while (1) {
90 low = sp;
91 sp = sf->back_chain & PSW_ADDR_INSN;
92 if (!sp)
93 break;
94 if (sp <= low || sp > high - sizeof(*sf))
95 return sp;
96 sf = (struct stack_frame *) sp;
97 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
98 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
99 }
100 /* Zero backchain detected, check for interrupt frame. */
101 sp = (unsigned long) (sf + 1);
102 if (sp <= low || sp > high - sizeof(*regs))
103 return sp;
104 regs = (struct pt_regs *) sp;
105 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
106 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
107 low = sp;
108 sp = regs->gprs[15];
109 }
110 }
111
112 static void show_trace(struct task_struct *task, unsigned long *stack)
113 {
114 register unsigned long __r15 asm ("15");
115 unsigned long sp;
116
117 sp = (unsigned long) stack;
118 if (!sp)
119 sp = task ? task->thread.ksp : __r15;
120 printk("Call Trace:\n");
121 #ifdef CONFIG_CHECK_STACK
122 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
123 S390_lowcore.panic_stack);
124 #endif
125 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
126 S390_lowcore.async_stack);
127 if (task)
128 __show_trace(sp, (unsigned long) task_stack_page(task),
129 (unsigned long) task_stack_page(task) + THREAD_SIZE);
130 else
131 __show_trace(sp, S390_lowcore.thread_info,
132 S390_lowcore.thread_info + THREAD_SIZE);
133 if (!task)
134 task = current;
135 debug_show_held_locks(task);
136 }
137
138 void show_stack(struct task_struct *task, unsigned long *sp)
139 {
140 register unsigned long * __r15 asm ("15");
141 unsigned long *stack;
142 int i;
143
144 if (!sp)
145 stack = task ? (unsigned long *) task->thread.ksp : __r15;
146 else
147 stack = sp;
148
149 for (i = 0; i < kstack_depth_to_print; i++) {
150 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
151 break;
152 if (i && ((i * sizeof (long) % 32) == 0))
153 printk("\n ");
154 printk(LONG, *stack++);
155 }
156 printk("\n");
157 show_trace(task, sp);
158 }
159
160 static void show_last_breaking_event(struct pt_regs *regs)
161 {
162 #ifdef CONFIG_64BIT
163 printk("Last Breaking-Event-Address:\n");
164 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
165 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
166 #endif
167 }
168
169 /*
170 * The architecture-independent dump_stack generator
171 */
172 void dump_stack(void)
173 {
174 printk("CPU: %d %s %s %.*s\n",
175 task_thread_info(current)->cpu, print_tainted(),
176 init_utsname()->release,
177 (int)strcspn(init_utsname()->version, " "),
178 init_utsname()->version);
179 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
180 current->comm, current->pid, current,
181 (void *) current->thread.ksp);
182 show_stack(NULL, NULL);
183 }
184 EXPORT_SYMBOL(dump_stack);
185
186 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
187 {
188 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
189 }
190
191 void show_registers(struct pt_regs *regs)
192 {
193 char *mode;
194
195 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
196 printk("%s PSW : %p %p",
197 mode, (void *) regs->psw.mask,
198 (void *) regs->psw.addr);
199 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
200 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
201 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
202 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
203 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
204 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
205 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
206 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
207 #ifdef CONFIG_64BIT
208 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
209 #endif
210 printk("\n%s GPRS: " FOURLONG, mode,
211 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
212 printk(" " FOURLONG,
213 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
214 printk(" " FOURLONG,
215 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
216 printk(" " FOURLONG,
217 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
218
219 show_code(regs);
220 }
221
222 void show_regs(struct pt_regs *regs)
223 {
224 print_modules();
225 printk("CPU: %d %s %s %.*s\n",
226 task_thread_info(current)->cpu, print_tainted(),
227 init_utsname()->release,
228 (int)strcspn(init_utsname()->version, " "),
229 init_utsname()->version);
230 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
231 current->comm, current->pid, current,
232 (void *) current->thread.ksp);
233 show_registers(regs);
234 /* Show stack backtrace if pt_regs is from kernel mode */
235 if (!(regs->psw.mask & PSW_MASK_PSTATE))
236 show_trace(NULL, (unsigned long *) regs->gprs[15]);
237 show_last_breaking_event(regs);
238 }
239
240 static DEFINE_SPINLOCK(die_lock);
241
242 void die(const char * str, struct pt_regs * regs, long err)
243 {
244 static int die_counter;
245
246 oops_enter();
247 debug_stop_all();
248 console_verbose();
249 spin_lock_irq(&die_lock);
250 bust_spinlocks(1);
251 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
252 #ifdef CONFIG_PREEMPT
253 printk("PREEMPT ");
254 #endif
255 #ifdef CONFIG_SMP
256 printk("SMP ");
257 #endif
258 #ifdef CONFIG_DEBUG_PAGEALLOC
259 printk("DEBUG_PAGEALLOC");
260 #endif
261 printk("\n");
262 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
263 show_regs(regs);
264 bust_spinlocks(0);
265 add_taint(TAINT_DIE);
266 spin_unlock_irq(&die_lock);
267 if (in_interrupt())
268 panic("Fatal exception in interrupt");
269 if (panic_on_oops)
270 panic("Fatal exception: panic_on_oops");
271 oops_exit();
272 do_exit(SIGSEGV);
273 }
274
275 static void inline report_user_fault(struct pt_regs *regs, long int_code,
276 int signr)
277 {
278 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
279 return;
280 if (!unhandled_signal(current, signr))
281 return;
282 if (!printk_ratelimit())
283 return;
284 printk("User process fault: interruption code 0x%lX ", int_code);
285 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
286 printk("\n");
287 show_regs(regs);
288 }
289
290 int is_valid_bugaddr(unsigned long addr)
291 {
292 return 1;
293 }
294
295 static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
296 struct pt_regs *regs, siginfo_t *info)
297 {
298 if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
299 pgm_int_code, signr) == NOTIFY_STOP)
300 return;
301
302 if (regs->psw.mask & PSW_MASK_PSTATE) {
303 struct task_struct *tsk = current;
304
305 tsk->thread.trap_no = pgm_int_code & 0xffff;
306 force_sig_info(signr, info, tsk);
307 report_user_fault(regs, pgm_int_code, signr);
308 } else {
309 const struct exception_table_entry *fixup;
310 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
311 if (fixup)
312 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
313 else {
314 enum bug_trap_type btt;
315
316 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
317 if (btt == BUG_TRAP_TYPE_WARN)
318 return;
319 die(str, regs, pgm_int_code);
320 }
321 }
322 }
323
324 static inline void __user *get_psw_address(struct pt_regs *regs,
325 long pgm_int_code)
326 {
327 return (void __user *)
328 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
329 }
330
331 void __kprobes do_per_trap(struct pt_regs *regs)
332 {
333 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
334 return;
335 if (tracehook_consider_fatal_signal(current, SIGTRAP))
336 force_sig(SIGTRAP, current);
337 }
338
339 static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
340 unsigned long trans_exc_code)
341 {
342 if (regs->psw.mask & PSW_MASK_PSTATE) {
343 report_user_fault(regs, pgm_int_code, SIGSEGV);
344 do_exit(SIGSEGV);
345 } else
346 die("Unknown program exception", regs, pgm_int_code);
347 }
348
349 #define DO_ERROR_INFO(name, signr, sicode, str) \
350 static void name(struct pt_regs *regs, long pgm_int_code, \
351 unsigned long trans_exc_code) \
352 { \
353 siginfo_t info; \
354 info.si_signo = signr; \
355 info.si_errno = 0; \
356 info.si_code = sicode; \
357 info.si_addr = get_psw_address(regs, pgm_int_code); \
358 do_trap(pgm_int_code, signr, str, regs, &info); \
359 }
360
361 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
362 "addressing exception")
363 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
364 "execute exception")
365 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
366 "fixpoint divide exception")
367 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
368 "fixpoint overflow exception")
369 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
370 "HFP overflow exception")
371 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
372 "HFP underflow exception")
373 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
374 "HFP significance exception")
375 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
376 "HFP divide exception")
377 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
378 "HFP square root exception")
379 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
380 "operand exception")
381 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
382 "privileged operation")
383 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
384 "special operation exception")
385 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
386 "translation exception")
387
388 static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
389 int fpc, long pgm_int_code)
390 {
391 siginfo_t si;
392
393 si.si_signo = SIGFPE;
394 si.si_errno = 0;
395 si.si_addr = location;
396 si.si_code = 0;
397 /* FPC[2] is Data Exception Code */
398 if ((fpc & 0x00000300) == 0) {
399 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
400 if (fpc & 0x8000) /* invalid fp operation */
401 si.si_code = FPE_FLTINV;
402 else if (fpc & 0x4000) /* div by 0 */
403 si.si_code = FPE_FLTDIV;
404 else if (fpc & 0x2000) /* overflow */
405 si.si_code = FPE_FLTOVF;
406 else if (fpc & 0x1000) /* underflow */
407 si.si_code = FPE_FLTUND;
408 else if (fpc & 0x0800) /* inexact */
409 si.si_code = FPE_FLTRES;
410 }
411 do_trap(pgm_int_code, SIGFPE,
412 "floating point exception", regs, &si);
413 }
414
415 static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
416 unsigned long trans_exc_code)
417 {
418 siginfo_t info;
419 __u8 opcode[6];
420 __u16 __user *location;
421 int signal = 0;
422
423 location = get_psw_address(regs, pgm_int_code);
424
425 if (regs->psw.mask & PSW_MASK_PSTATE) {
426 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
427 return;
428 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
429 if (tracehook_consider_fatal_signal(current, SIGTRAP))
430 force_sig(SIGTRAP, current);
431 else
432 signal = SIGILL;
433 #ifdef CONFIG_MATHEMU
434 } else if (opcode[0] == 0xb3) {
435 if (get_user(*((__u16 *) (opcode+2)), location+1))
436 return;
437 signal = math_emu_b3(opcode, regs);
438 } else if (opcode[0] == 0xed) {
439 if (get_user(*((__u32 *) (opcode+2)),
440 (__u32 __user *)(location+1)))
441 return;
442 signal = math_emu_ed(opcode, regs);
443 } else if (*((__u16 *) opcode) == 0xb299) {
444 if (get_user(*((__u16 *) (opcode+2)), location+1))
445 return;
446 signal = math_emu_srnm(opcode, regs);
447 } else if (*((__u16 *) opcode) == 0xb29c) {
448 if (get_user(*((__u16 *) (opcode+2)), location+1))
449 return;
450 signal = math_emu_stfpc(opcode, regs);
451 } else if (*((__u16 *) opcode) == 0xb29d) {
452 if (get_user(*((__u16 *) (opcode+2)), location+1))
453 return;
454 signal = math_emu_lfpc(opcode, regs);
455 #endif
456 } else
457 signal = SIGILL;
458 } else {
459 /*
460 * If we get an illegal op in kernel mode, send it through the
461 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
462 */
463 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
464 3, SIGTRAP) != NOTIFY_STOP)
465 signal = SIGILL;
466 }
467
468 #ifdef CONFIG_MATHEMU
469 if (signal == SIGFPE)
470 do_fp_trap(regs, location,
471 current->thread.fp_regs.fpc, pgm_int_code);
472 else if (signal == SIGSEGV) {
473 info.si_signo = signal;
474 info.si_errno = 0;
475 info.si_code = SEGV_MAPERR;
476 info.si_addr = (void __user *) location;
477 do_trap(pgm_int_code, signal,
478 "user address fault", regs, &info);
479 } else
480 #endif
481 if (signal) {
482 info.si_signo = signal;
483 info.si_errno = 0;
484 info.si_code = ILL_ILLOPC;
485 info.si_addr = (void __user *) location;
486 do_trap(pgm_int_code, signal,
487 "illegal operation", regs, &info);
488 }
489 }
490
491
492 #ifdef CONFIG_MATHEMU
493 asmlinkage void specification_exception(struct pt_regs *regs,
494 long pgm_int_code,
495 unsigned long trans_exc_code)
496 {
497 __u8 opcode[6];
498 __u16 __user *location = NULL;
499 int signal = 0;
500
501 location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
502
503 if (regs->psw.mask & PSW_MASK_PSTATE) {
504 get_user(*((__u16 *) opcode), location);
505 switch (opcode[0]) {
506 case 0x28: /* LDR Rx,Ry */
507 signal = math_emu_ldr(opcode);
508 break;
509 case 0x38: /* LER Rx,Ry */
510 signal = math_emu_ler(opcode);
511 break;
512 case 0x60: /* STD R,D(X,B) */
513 get_user(*((__u16 *) (opcode+2)), location+1);
514 signal = math_emu_std(opcode, regs);
515 break;
516 case 0x68: /* LD R,D(X,B) */
517 get_user(*((__u16 *) (opcode+2)), location+1);
518 signal = math_emu_ld(opcode, regs);
519 break;
520 case 0x70: /* STE R,D(X,B) */
521 get_user(*((__u16 *) (opcode+2)), location+1);
522 signal = math_emu_ste(opcode, regs);
523 break;
524 case 0x78: /* LE R,D(X,B) */
525 get_user(*((__u16 *) (opcode+2)), location+1);
526 signal = math_emu_le(opcode, regs);
527 break;
528 default:
529 signal = SIGILL;
530 break;
531 }
532 } else
533 signal = SIGILL;
534
535 if (signal == SIGFPE)
536 do_fp_trap(regs, location,
537 current->thread.fp_regs.fpc, pgm_int_code);
538 else if (signal) {
539 siginfo_t info;
540 info.si_signo = signal;
541 info.si_errno = 0;
542 info.si_code = ILL_ILLOPN;
543 info.si_addr = location;
544 do_trap(pgm_int_code, signal,
545 "specification exception", regs, &info);
546 }
547 }
548 #else
549 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
550 "specification exception");
551 #endif
552
553 static void data_exception(struct pt_regs *regs, long pgm_int_code,
554 unsigned long trans_exc_code)
555 {
556 __u16 __user *location;
557 int signal = 0;
558
559 location = get_psw_address(regs, pgm_int_code);
560
561 if (MACHINE_HAS_IEEE)
562 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
563
564 #ifdef CONFIG_MATHEMU
565 else if (regs->psw.mask & PSW_MASK_PSTATE) {
566 __u8 opcode[6];
567 get_user(*((__u16 *) opcode), location);
568 switch (opcode[0]) {
569 case 0x28: /* LDR Rx,Ry */
570 signal = math_emu_ldr(opcode);
571 break;
572 case 0x38: /* LER Rx,Ry */
573 signal = math_emu_ler(opcode);
574 break;
575 case 0x60: /* STD R,D(X,B) */
576 get_user(*((__u16 *) (opcode+2)), location+1);
577 signal = math_emu_std(opcode, regs);
578 break;
579 case 0x68: /* LD R,D(X,B) */
580 get_user(*((__u16 *) (opcode+2)), location+1);
581 signal = math_emu_ld(opcode, regs);
582 break;
583 case 0x70: /* STE R,D(X,B) */
584 get_user(*((__u16 *) (opcode+2)), location+1);
585 signal = math_emu_ste(opcode, regs);
586 break;
587 case 0x78: /* LE R,D(X,B) */
588 get_user(*((__u16 *) (opcode+2)), location+1);
589 signal = math_emu_le(opcode, regs);
590 break;
591 case 0xb3:
592 get_user(*((__u16 *) (opcode+2)), location+1);
593 signal = math_emu_b3(opcode, regs);
594 break;
595 case 0xed:
596 get_user(*((__u32 *) (opcode+2)),
597 (__u32 __user *)(location+1));
598 signal = math_emu_ed(opcode, regs);
599 break;
600 case 0xb2:
601 if (opcode[1] == 0x99) {
602 get_user(*((__u16 *) (opcode+2)), location+1);
603 signal = math_emu_srnm(opcode, regs);
604 } else if (opcode[1] == 0x9c) {
605 get_user(*((__u16 *) (opcode+2)), location+1);
606 signal = math_emu_stfpc(opcode, regs);
607 } else if (opcode[1] == 0x9d) {
608 get_user(*((__u16 *) (opcode+2)), location+1);
609 signal = math_emu_lfpc(opcode, regs);
610 } else
611 signal = SIGILL;
612 break;
613 default:
614 signal = SIGILL;
615 break;
616 }
617 }
618 #endif
619 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
620 signal = SIGFPE;
621 else
622 signal = SIGILL;
623 if (signal == SIGFPE)
624 do_fp_trap(regs, location,
625 current->thread.fp_regs.fpc, pgm_int_code);
626 else if (signal) {
627 siginfo_t info;
628 info.si_signo = signal;
629 info.si_errno = 0;
630 info.si_code = ILL_ILLOPN;
631 info.si_addr = location;
632 do_trap(pgm_int_code, signal, "data exception", regs, &info);
633 }
634 }
635
636 static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
637 unsigned long trans_exc_code)
638 {
639 siginfo_t info;
640
641 /* Set user psw back to home space mode. */
642 if (regs->psw.mask & PSW_MASK_PSTATE)
643 regs->psw.mask |= PSW_ASC_HOME;
644 /* Send SIGILL. */
645 info.si_signo = SIGILL;
646 info.si_errno = 0;
647 info.si_code = ILL_PRVOPC;
648 info.si_addr = get_psw_address(regs, pgm_int_code);
649 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
650 }
651
652 asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
653 {
654 bust_spinlocks(1);
655 printk("Kernel stack overflow.\n");
656 show_regs(regs);
657 bust_spinlocks(0);
658 panic("Corrupt kernel stack, can't continue.");
659 }
660
661 /* init is done in lowcore.S and head.S */
662
663 void __init trap_init(void)
664 {
665 int i;
666
667 for (i = 0; i < 128; i++)
668 pgm_check_table[i] = &default_trap_handler;
669 pgm_check_table[1] = &illegal_op;
670 pgm_check_table[2] = &privileged_op;
671 pgm_check_table[3] = &execute_exception;
672 pgm_check_table[4] = &do_protection_exception;
673 pgm_check_table[5] = &addressing_exception;
674 pgm_check_table[6] = &specification_exception;
675 pgm_check_table[7] = &data_exception;
676 pgm_check_table[8] = &overflow_exception;
677 pgm_check_table[9] = &divide_exception;
678 pgm_check_table[0x0A] = &overflow_exception;
679 pgm_check_table[0x0B] = &divide_exception;
680 pgm_check_table[0x0C] = &hfp_overflow_exception;
681 pgm_check_table[0x0D] = &hfp_underflow_exception;
682 pgm_check_table[0x0E] = &hfp_significance_exception;
683 pgm_check_table[0x0F] = &hfp_divide_exception;
684 pgm_check_table[0x10] = &do_dat_exception;
685 pgm_check_table[0x11] = &do_dat_exception;
686 pgm_check_table[0x12] = &translation_exception;
687 pgm_check_table[0x13] = &special_op_exception;
688 #ifdef CONFIG_64BIT
689 pgm_check_table[0x38] = &do_asce_exception;
690 pgm_check_table[0x39] = &do_dat_exception;
691 pgm_check_table[0x3A] = &do_dat_exception;
692 pgm_check_table[0x3B] = &do_dat_exception;
693 #endif /* CONFIG_64BIT */
694 pgm_check_table[0x15] = &operand_exception;
695 pgm_check_table[0x1C] = &space_switch_exception;
696 pgm_check_table[0x1D] = &hfp_sqrt_exception;
697 /* Enable machine checks early. */
698 local_mcck_enable();
699 }
This page took 0.242729 seconds and 5 git commands to generate.