parisc: ratelimit userspace segfault printing
[deliverable/linux.git] / arch / parisc / kernel / traps.c
1 /*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
28 #include <linux/ratelimit.h>
29
30 #include <asm/assembly.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/traps.h>
35 #include <asm/unaligned.h>
36 #include <linux/atomic.h>
37 #include <asm/smp.h>
38 #include <asm/pdc.h>
39 #include <asm/pdc_chassis.h>
40 #include <asm/unwind.h>
41 #include <asm/tlbflush.h>
42 #include <asm/cacheflush.h>
43
44 #include "../math-emu/math-emu.h" /* for handle_fpe() */
45
46 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
47 DEFINE_SPINLOCK(pa_dbit_lock);
48 #endif
49
50 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
51 struct pt_regs *regs);
52
53 static int printbinary(char *buf, unsigned long x, int nbits)
54 {
55 unsigned long mask = 1UL << (nbits - 1);
56 while (mask != 0) {
57 *buf++ = (mask & x ? '1' : '0');
58 mask >>= 1;
59 }
60 *buf = '\0';
61
62 return nbits;
63 }
64
65 #ifdef CONFIG_64BIT
66 #define RFMT "%016lx"
67 #else
68 #define RFMT "%08lx"
69 #endif
70 #define FFMT "%016llx" /* fpregs are 64-bit always */
71
72 #define PRINTREGS(lvl,r,f,fmt,x) \
73 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
74 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
75 (r)[(x)+2], (r)[(x)+3])
76
77 static void print_gr(char *level, struct pt_regs *regs)
78 {
79 int i;
80 char buf[64];
81
82 printk("%s\n", level);
83 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
84 printbinary(buf, regs->gr[0], 32);
85 printk("%sPSW: %s %s\n", level, buf, print_tainted());
86
87 for (i = 0; i < 32; i += 4)
88 PRINTREGS(level, regs->gr, "r", RFMT, i);
89 }
90
91 static void print_fr(char *level, struct pt_regs *regs)
92 {
93 int i;
94 char buf[64];
95 struct { u32 sw[2]; } s;
96
97 /* FR are 64bit everywhere. Need to use asm to get the content
98 * of fpsr/fper1, and we assume that we won't have a FP Identify
99 * in our way, otherwise we're screwed.
100 * The fldd is used to restore the T-bit if there was one, as the
101 * store clears it anyway.
102 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
103 asm volatile ("fstd %%fr0,0(%1) \n\t"
104 "fldd 0(%1),%%fr0 \n\t"
105 : "=m" (s) : "r" (&s) : "r0");
106
107 printk("%s\n", level);
108 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
109 printbinary(buf, s.sw[0], 32);
110 printk("%sFPSR: %s\n", level, buf);
111 printk("%sFPER1: %08x\n", level, s.sw[1]);
112
113 /* here we'll print fr0 again, tho it'll be meaningless */
114 for (i = 0; i < 32; i += 4)
115 PRINTREGS(level, regs->fr, "fr", FFMT, i);
116 }
117
118 void show_regs(struct pt_regs *regs)
119 {
120 int i, user;
121 char *level;
122 unsigned long cr30, cr31;
123
124 user = user_mode(regs);
125 level = user ? KERN_DEBUG : KERN_CRIT;
126
127 show_regs_print_info(level);
128
129 print_gr(level, regs);
130
131 for (i = 0; i < 8; i += 4)
132 PRINTREGS(level, regs->sr, "sr", RFMT, i);
133
134 if (user)
135 print_fr(level, regs);
136
137 cr30 = mfctl(30);
138 cr31 = mfctl(31);
139 printk("%s\n", level);
140 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
141 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
142 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
143 level, regs->iir, regs->isr, regs->ior);
144 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
145 level, current_thread_info()->cpu, cr30, cr31);
146 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
147
148 if (user) {
149 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
150 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
151 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
152 } else {
153 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
154 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
155 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
156
157 parisc_show_stack(current, NULL, regs);
158 }
159 }
160
161 static DEFINE_RATELIMIT_STATE(_hppa_rs,
162 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
163
164 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
165 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
166 printk(fmt, ##__VA_ARGS__); \
167 show_regs(regs); \
168 } \
169 }
170
171
172 static void do_show_stack(struct unwind_frame_info *info)
173 {
174 int i = 1;
175
176 printk(KERN_CRIT "Backtrace:\n");
177 while (i <= 16) {
178 if (unwind_once(info) < 0 || info->ip == 0)
179 break;
180
181 if (__kernel_text_address(info->ip)) {
182 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
183 info->ip, (void *) info->ip);
184 i++;
185 }
186 }
187 printk(KERN_CRIT "\n");
188 }
189
190 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
191 struct pt_regs *regs)
192 {
193 struct unwind_frame_info info;
194 struct task_struct *t;
195
196 t = task ? task : current;
197 if (regs) {
198 unwind_frame_init(&info, t, regs);
199 goto show_stack;
200 }
201
202 if (t == current) {
203 unsigned long sp;
204
205 HERE:
206 asm volatile ("copy %%r30, %0" : "=r"(sp));
207 {
208 struct pt_regs r;
209
210 memset(&r, 0, sizeof(struct pt_regs));
211 r.iaoq[0] = (unsigned long)&&HERE;
212 r.gr[2] = (unsigned long)__builtin_return_address(0);
213 r.gr[30] = sp;
214
215 unwind_frame_init(&info, current, &r);
216 }
217 } else {
218 unwind_frame_init_from_blocked_task(&info, t);
219 }
220
221 show_stack:
222 do_show_stack(&info);
223 }
224
225 void show_stack(struct task_struct *t, unsigned long *sp)
226 {
227 return parisc_show_stack(t, sp, NULL);
228 }
229
230 int is_valid_bugaddr(unsigned long iaoq)
231 {
232 return 1;
233 }
234
235 void die_if_kernel(char *str, struct pt_regs *regs, long err)
236 {
237 if (user_mode(regs)) {
238 if (err == 0)
239 return; /* STFU */
240
241 parisc_printk_ratelimited(1, regs,
242 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
243 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
244
245 return;
246 }
247
248 oops_in_progress = 1;
249
250 oops_enter();
251
252 /* Amuse the user in a SPARC fashion */
253 if (err) printk(KERN_CRIT
254 " _______________________________ \n"
255 " < Your System ate a SPARC! Gah! >\n"
256 " ------------------------------- \n"
257 " \\ ^__^\n"
258 " (__)\\ )\\/\\\n"
259 " U ||----w |\n"
260 " || ||\n");
261
262 /* unlock the pdc lock if necessary */
263 pdc_emergency_unlock();
264
265 /* maybe the kernel hasn't booted very far yet and hasn't been able
266 * to initialize the serial or STI console. In that case we should
267 * re-enable the pdc console, so that the user will be able to
268 * identify the problem. */
269 if (!console_drivers)
270 pdc_console_restart();
271
272 if (err)
273 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
274 current->comm, task_pid_nr(current), str, err);
275
276 /* Wot's wrong wif bein' racy? */
277 if (current->thread.flags & PARISC_KERNEL_DEATH) {
278 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
279 local_irq_enable();
280 while (1);
281 }
282 current->thread.flags |= PARISC_KERNEL_DEATH;
283
284 show_regs(regs);
285 dump_stack();
286 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
287
288 if (in_interrupt())
289 panic("Fatal exception in interrupt");
290
291 if (panic_on_oops) {
292 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
293 ssleep(5);
294 panic("Fatal exception");
295 }
296
297 oops_exit();
298 do_exit(SIGSEGV);
299 }
300
301 /* gdb uses break 4,8 */
302 #define GDB_BREAK_INSN 0x10004
303 static void handle_gdb_break(struct pt_regs *regs, int wot)
304 {
305 struct siginfo si;
306
307 si.si_signo = SIGTRAP;
308 si.si_errno = 0;
309 si.si_code = wot;
310 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
311 force_sig_info(SIGTRAP, &si, current);
312 }
313
314 static void handle_break(struct pt_regs *regs)
315 {
316 unsigned iir = regs->iir;
317
318 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
319 /* check if a BUG() or WARN() trapped here. */
320 enum bug_trap_type tt;
321 tt = report_bug(regs->iaoq[0] & ~3, regs);
322 if (tt == BUG_TRAP_TYPE_WARN) {
323 regs->iaoq[0] += 4;
324 regs->iaoq[1] += 4;
325 return; /* return to next instruction when WARN_ON(). */
326 }
327 die_if_kernel("Unknown kernel breakpoint", regs,
328 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
329 }
330
331 if (unlikely(iir != GDB_BREAK_INSN))
332 parisc_printk_ratelimited(0, regs,
333 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
334 iir & 31, (iir>>13) & ((1<<13)-1),
335 task_pid_nr(current), current->comm);
336
337 /* send standard GDB signal */
338 handle_gdb_break(regs, TRAP_BRKPT);
339 }
340
341 static void default_trap(int code, struct pt_regs *regs)
342 {
343 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
344 show_regs(regs);
345 }
346
347 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
348
349
350 void transfer_pim_to_trap_frame(struct pt_regs *regs)
351 {
352 register int i;
353 extern unsigned int hpmc_pim_data[];
354 struct pdc_hpmc_pim_11 *pim_narrow;
355 struct pdc_hpmc_pim_20 *pim_wide;
356
357 if (boot_cpu_data.cpu_type >= pcxu) {
358
359 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
360
361 /*
362 * Note: The following code will probably generate a
363 * bunch of truncation error warnings from the compiler.
364 * Could be handled with an ifdef, but perhaps there
365 * is a better way.
366 */
367
368 regs->gr[0] = pim_wide->cr[22];
369
370 for (i = 1; i < 32; i++)
371 regs->gr[i] = pim_wide->gr[i];
372
373 for (i = 0; i < 32; i++)
374 regs->fr[i] = pim_wide->fr[i];
375
376 for (i = 0; i < 8; i++)
377 regs->sr[i] = pim_wide->sr[i];
378
379 regs->iasq[0] = pim_wide->cr[17];
380 regs->iasq[1] = pim_wide->iasq_back;
381 regs->iaoq[0] = pim_wide->cr[18];
382 regs->iaoq[1] = pim_wide->iaoq_back;
383
384 regs->sar = pim_wide->cr[11];
385 regs->iir = pim_wide->cr[19];
386 regs->isr = pim_wide->cr[20];
387 regs->ior = pim_wide->cr[21];
388 }
389 else {
390 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
391
392 regs->gr[0] = pim_narrow->cr[22];
393
394 for (i = 1; i < 32; i++)
395 regs->gr[i] = pim_narrow->gr[i];
396
397 for (i = 0; i < 32; i++)
398 regs->fr[i] = pim_narrow->fr[i];
399
400 for (i = 0; i < 8; i++)
401 regs->sr[i] = pim_narrow->sr[i];
402
403 regs->iasq[0] = pim_narrow->cr[17];
404 regs->iasq[1] = pim_narrow->iasq_back;
405 regs->iaoq[0] = pim_narrow->cr[18];
406 regs->iaoq[1] = pim_narrow->iaoq_back;
407
408 regs->sar = pim_narrow->cr[11];
409 regs->iir = pim_narrow->cr[19];
410 regs->isr = pim_narrow->cr[20];
411 regs->ior = pim_narrow->cr[21];
412 }
413
414 /*
415 * The following fields only have meaning if we came through
416 * another path. So just zero them here.
417 */
418
419 regs->ksp = 0;
420 regs->kpc = 0;
421 regs->orig_r28 = 0;
422 }
423
424
425 /*
426 * This routine is called as a last resort when everything else
427 * has gone clearly wrong. We get called for faults in kernel space,
428 * and HPMC's.
429 */
430 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
431 {
432 static DEFINE_SPINLOCK(terminate_lock);
433
434 oops_in_progress = 1;
435
436 set_eiem(0);
437 local_irq_disable();
438 spin_lock(&terminate_lock);
439
440 /* unlock the pdc lock if necessary */
441 pdc_emergency_unlock();
442
443 /* restart pdc console if necessary */
444 if (!console_drivers)
445 pdc_console_restart();
446
447 /* Not all paths will gutter the processor... */
448 switch(code){
449
450 case 1:
451 transfer_pim_to_trap_frame(regs);
452 break;
453
454 default:
455 /* Fall through */
456 break;
457
458 }
459
460 {
461 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
462 struct unwind_frame_info info;
463 unwind_frame_init(&info, current, regs);
464 do_show_stack(&info);
465 }
466
467 printk("\n");
468 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
469 msg, code, regs, offset);
470 show_regs(regs);
471
472 spin_unlock(&terminate_lock);
473
474 /* put soft power button back under hardware control;
475 * if the user had pressed it once at any time, the
476 * system will shut down immediately right here. */
477 pdc_soft_power_button(0);
478
479 /* Call kernel panic() so reboot timeouts work properly
480 * FIXME: This function should be on the list of
481 * panic notifiers, and we should call panic
482 * directly from the location that we wish.
483 * e.g. We should not call panic from
484 * parisc_terminate, but rather the oter way around.
485 * This hack works, prints the panic message twice,
486 * and it enables reboot timers!
487 */
488 panic(msg);
489 }
490
491 void notrace handle_interruption(int code, struct pt_regs *regs)
492 {
493 unsigned long fault_address = 0;
494 unsigned long fault_space = 0;
495 struct siginfo si;
496
497 if (code == 1)
498 pdc_console_restart(); /* switch back to pdc if HPMC */
499 else
500 local_irq_enable();
501
502 /* Security check:
503 * If the priority level is still user, and the
504 * faulting space is not equal to the active space
505 * then the user is attempting something in a space
506 * that does not belong to them. Kill the process.
507 *
508 * This is normally the situation when the user
509 * attempts to jump into the kernel space at the
510 * wrong offset, be it at the gateway page or a
511 * random location.
512 *
513 * We cannot normally signal the process because it
514 * could *be* on the gateway page, and processes
515 * executing on the gateway page can't have signals
516 * delivered.
517 *
518 * We merely readjust the address into the users
519 * space, at a destination address of zero, and
520 * allow processing to continue.
521 */
522 if (((unsigned long)regs->iaoq[0] & 3) &&
523 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
524 /* Kill the user process later */
525 regs->iaoq[0] = 0 | 3;
526 regs->iaoq[1] = regs->iaoq[0] + 4;
527 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
528 regs->gr[0] &= ~PSW_B;
529 return;
530 }
531
532 #if 0
533 printk(KERN_CRIT "Interruption # %d\n", code);
534 #endif
535
536 switch(code) {
537
538 case 1:
539 /* High-priority machine check (HPMC) */
540
541 /* set up a new led state on systems shipped with a LED State panel */
542 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
543
544 parisc_terminate("High Priority Machine Check (HPMC)",
545 regs, code, 0);
546 /* NOT REACHED */
547
548 case 2:
549 /* Power failure interrupt */
550 printk(KERN_CRIT "Power failure interrupt !\n");
551 return;
552
553 case 3:
554 /* Recovery counter trap */
555 regs->gr[0] &= ~PSW_R;
556 if (user_space(regs))
557 handle_gdb_break(regs, TRAP_TRACE);
558 /* else this must be the start of a syscall - just let it run */
559 return;
560
561 case 5:
562 /* Low-priority machine check */
563 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
564
565 flush_cache_all();
566 flush_tlb_all();
567 cpu_lpmc(5, regs);
568 return;
569
570 case 6:
571 /* Instruction TLB miss fault/Instruction page fault */
572 fault_address = regs->iaoq[0];
573 fault_space = regs->iasq[0];
574 break;
575
576 case 8:
577 /* Illegal instruction trap */
578 die_if_kernel("Illegal instruction", regs, code);
579 si.si_code = ILL_ILLOPC;
580 goto give_sigill;
581
582 case 9:
583 /* Break instruction trap */
584 handle_break(regs);
585 return;
586
587 case 10:
588 /* Privileged operation trap */
589 die_if_kernel("Privileged operation", regs, code);
590 si.si_code = ILL_PRVOPC;
591 goto give_sigill;
592
593 case 11:
594 /* Privileged register trap */
595 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
596
597 /* This is a MFCTL cr26/cr27 to gr instruction.
598 * PCXS traps on this, so we need to emulate it.
599 */
600
601 if (regs->iir & 0x00200000)
602 regs->gr[regs->iir & 0x1f] = mfctl(27);
603 else
604 regs->gr[regs->iir & 0x1f] = mfctl(26);
605
606 regs->iaoq[0] = regs->iaoq[1];
607 regs->iaoq[1] += 4;
608 regs->iasq[0] = regs->iasq[1];
609 return;
610 }
611
612 die_if_kernel("Privileged register usage", regs, code);
613 si.si_code = ILL_PRVREG;
614 give_sigill:
615 si.si_signo = SIGILL;
616 si.si_errno = 0;
617 si.si_addr = (void __user *) regs->iaoq[0];
618 force_sig_info(SIGILL, &si, current);
619 return;
620
621 case 12:
622 /* Overflow Trap, let the userland signal handler do the cleanup */
623 si.si_signo = SIGFPE;
624 si.si_code = FPE_INTOVF;
625 si.si_addr = (void __user *) regs->iaoq[0];
626 force_sig_info(SIGFPE, &si, current);
627 return;
628
629 case 13:
630 /* Conditional Trap
631 The condition succeeds in an instruction which traps
632 on condition */
633 if(user_mode(regs)){
634 si.si_signo = SIGFPE;
635 /* Set to zero, and let the userspace app figure it out from
636 the insn pointed to by si_addr */
637 si.si_code = 0;
638 si.si_addr = (void __user *) regs->iaoq[0];
639 force_sig_info(SIGFPE, &si, current);
640 return;
641 }
642 /* The kernel doesn't want to handle condition codes */
643 break;
644
645 case 14:
646 /* Assist Exception Trap, i.e. floating point exception. */
647 die_if_kernel("Floating point exception", regs, 0); /* quiet */
648 __inc_irq_stat(irq_fpassist_count);
649 handle_fpe(regs);
650 return;
651
652 case 15:
653 /* Data TLB miss fault/Data page fault */
654 /* Fall through */
655 case 16:
656 /* Non-access instruction TLB miss fault */
657 /* The instruction TLB entry needed for the target address of the FIC
658 is absent, and hardware can't find it, so we get to cleanup */
659 /* Fall through */
660 case 17:
661 /* Non-access data TLB miss fault/Non-access data page fault */
662 /* FIXME:
663 Still need to add slow path emulation code here!
664 If the insn used a non-shadow register, then the tlb
665 handlers could not have their side-effect (e.g. probe
666 writing to a target register) emulated since rfir would
667 erase the changes to said register. Instead we have to
668 setup everything, call this function we are in, and emulate
669 by hand. Technically we need to emulate:
670 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
671 */
672 fault_address = regs->ior;
673 fault_space = regs->isr;
674 break;
675
676 case 18:
677 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
678 /* Check for unaligned access */
679 if (check_unaligned(regs)) {
680 handle_unaligned(regs);
681 return;
682 }
683 /* Fall Through */
684 case 26:
685 /* PCXL: Data memory access rights trap */
686 fault_address = regs->ior;
687 fault_space = regs->isr;
688 break;
689
690 case 19:
691 /* Data memory break trap */
692 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
693 /* fall thru */
694 case 21:
695 /* Page reference trap */
696 handle_gdb_break(regs, TRAP_HWBKPT);
697 return;
698
699 case 25:
700 /* Taken branch trap */
701 regs->gr[0] &= ~PSW_T;
702 if (user_space(regs))
703 handle_gdb_break(regs, TRAP_BRANCH);
704 /* else this must be the start of a syscall - just let it
705 * run.
706 */
707 return;
708
709 case 7:
710 /* Instruction access rights */
711 /* PCXL: Instruction memory protection trap */
712
713 /*
714 * This could be caused by either: 1) a process attempting
715 * to execute within a vma that does not have execute
716 * permission, or 2) an access rights violation caused by a
717 * flush only translation set up by ptep_get_and_clear().
718 * So we check the vma permissions to differentiate the two.
719 * If the vma indicates we have execute permission, then
720 * the cause is the latter one. In this case, we need to
721 * call do_page_fault() to fix the problem.
722 */
723
724 if (user_mode(regs)) {
725 struct vm_area_struct *vma;
726
727 down_read(&current->mm->mmap_sem);
728 vma = find_vma(current->mm,regs->iaoq[0]);
729 if (vma && (regs->iaoq[0] >= vma->vm_start)
730 && (vma->vm_flags & VM_EXEC)) {
731
732 fault_address = regs->iaoq[0];
733 fault_space = regs->iasq[0];
734
735 up_read(&current->mm->mmap_sem);
736 break; /* call do_page_fault() */
737 }
738 up_read(&current->mm->mmap_sem);
739 }
740 /* Fall Through */
741 case 27:
742 /* Data memory protection ID trap */
743 if (code == 27 && !user_mode(regs) &&
744 fixup_exception(regs))
745 return;
746
747 die_if_kernel("Protection id trap", regs, code);
748 si.si_code = SEGV_MAPERR;
749 si.si_signo = SIGSEGV;
750 si.si_errno = 0;
751 if (code == 7)
752 si.si_addr = (void __user *) regs->iaoq[0];
753 else
754 si.si_addr = (void __user *) regs->ior;
755 force_sig_info(SIGSEGV, &si, current);
756 return;
757
758 case 28:
759 /* Unaligned data reference trap */
760 handle_unaligned(regs);
761 return;
762
763 default:
764 if (user_mode(regs)) {
765 parisc_printk_ratelimited(0, regs, KERN_DEBUG
766 "handle_interruption() pid=%d command='%s'\n",
767 task_pid_nr(current), current->comm);
768 /* SIGBUS, for lack of a better one. */
769 si.si_signo = SIGBUS;
770 si.si_code = BUS_OBJERR;
771 si.si_errno = 0;
772 si.si_addr = (void __user *) regs->ior;
773 force_sig_info(SIGBUS, &si, current);
774 return;
775 }
776 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
777
778 parisc_terminate("Unexpected interruption", regs, code, 0);
779 /* NOT REACHED */
780 }
781
782 if (user_mode(regs)) {
783 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
784 parisc_printk_ratelimited(0, regs, KERN_DEBUG
785 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
786 code, fault_space,
787 task_pid_nr(current), current->comm);
788 si.si_signo = SIGSEGV;
789 si.si_errno = 0;
790 si.si_code = SEGV_MAPERR;
791 si.si_addr = (void __user *) regs->ior;
792 force_sig_info(SIGSEGV, &si, current);
793 return;
794 }
795 }
796 else {
797
798 /*
799 * The kernel should never fault on its own address space,
800 * unless pagefault_disable() was called before.
801 */
802
803 if (fault_space == 0 && !in_atomic())
804 {
805 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
806 parisc_terminate("Kernel Fault", regs, code, fault_address);
807 }
808 }
809
810 do_page_fault(regs, code, fault_address);
811 }
812
813
814 int __init check_ivt(void *iva)
815 {
816 extern u32 os_hpmc_size;
817 extern const u32 os_hpmc[];
818
819 int i;
820 u32 check = 0;
821 u32 *ivap;
822 u32 *hpmcp;
823 u32 length;
824
825 if (strcmp((char *)iva, "cows can fly"))
826 return -1;
827
828 ivap = (u32 *)iva;
829
830 for (i = 0; i < 8; i++)
831 *ivap++ = 0;
832
833 /* Compute Checksum for HPMC handler */
834 length = os_hpmc_size;
835 ivap[7] = length;
836
837 hpmcp = (u32 *)os_hpmc;
838
839 for (i=0; i<length/4; i++)
840 check += *hpmcp++;
841
842 for (i=0; i<8; i++)
843 check += ivap[i];
844
845 ivap[5] = -check;
846
847 return 0;
848 }
849
850 #ifndef CONFIG_64BIT
851 extern const void fault_vector_11;
852 #endif
853 extern const void fault_vector_20;
854
855 void __init trap_init(void)
856 {
857 void *iva;
858
859 if (boot_cpu_data.cpu_type >= pcxu)
860 iva = (void *) &fault_vector_20;
861 else
862 #ifdef CONFIG_64BIT
863 panic("Can't boot 64-bit OS on PA1.1 processor!");
864 #else
865 iva = (void *) &fault_vector_11;
866 #endif
867
868 if (check_ivt(iva))
869 panic("IVT invalid");
870 }
This page took 0.079434 seconds and 5 git commands to generate.