Commit | Line | Data |
---|---|---|
88278ca2 | 1 | /* |
1da177e4 LT |
2 | * fault.c: Page fault handlers for the Sparc. |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | |
6 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
7 | */ | |
8 | ||
9 | #include <asm/head.h> | |
10 | ||
11 | #include <linux/string.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/mman.h> | |
16 | #include <linux/threads.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/signal.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/smp.h> | |
a084b667 | 21 | #include <linux/perf_event.h> |
1da177e4 | 22 | #include <linux/interrupt.h> |
1eeb66a1 | 23 | #include <linux/kdebug.h> |
1da177e4 | 24 | |
1da177e4 LT |
25 | #include <asm/page.h> |
26 | #include <asm/pgtable.h> | |
1da177e4 LT |
27 | #include <asm/openprom.h> |
28 | #include <asm/oplib.h> | |
29 | #include <asm/smp.h> | |
30 | #include <asm/traps.h> | |
1da177e4 LT |
31 | #include <asm/uaccess.h> |
32 | ||
1da177e4 LT |
33 | extern int prom_node_root; |
34 | ||
4b177647 DM |
35 | int show_unhandled_signals = 1; |
36 | ||
1da177e4 LT |
37 | /* At boot time we determine these two values necessary for setting |
38 | * up the segment maps and page table entries (pte's). | |
39 | */ | |
40 | ||
41 | int num_segmaps, num_contexts; | |
42 | int invalid_segment; | |
43 | ||
44 | /* various Virtual Address Cache parameters we find at boot time... */ | |
45 | ||
46 | int vac_size, vac_linesize, vac_do_hw_vac_flushes; | |
47 | int vac_entries_per_context, vac_entries_per_segment; | |
48 | int vac_entries_per_page; | |
49 | ||
9f2b2a5f DM |
50 | /* Return how much physical memory we have. */ |
51 | unsigned long probe_memory(void) | |
1da177e4 | 52 | { |
9f2b2a5f DM |
53 | unsigned long total = 0; |
54 | int i; | |
1da177e4 | 55 | |
9f2b2a5f DM |
56 | for (i = 0; sp_banks[i].num_bytes; i++) |
57 | total += sp_banks[i].num_bytes; | |
1da177e4 | 58 | |
1da177e4 LT |
59 | return total; |
60 | } | |
61 | ||
1da177e4 LT |
62 | /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */ |
63 | asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, | |
64 | unsigned long svaddr, unsigned long aerr, | |
65 | unsigned long avaddr) | |
66 | { | |
1da177e4 LT |
67 | printk("FAULT: NMI received\n"); |
68 | printk("SREGS: Synchronous Error %08lx\n", serr); | |
69 | printk(" Synchronous Vaddr %08lx\n", svaddr); | |
70 | printk(" Asynchronous Error %08lx\n", aerr); | |
71 | printk(" Asynchronous Vaddr %08lx\n", avaddr); | |
1da177e4 LT |
72 | printk("REGISTER DUMP:\n"); |
73 | show_regs(regs); | |
74 | prom_halt(); | |
75 | } | |
76 | ||
77 | static void unhandled_fault(unsigned long, struct task_struct *, | |
78 | struct pt_regs *) __attribute__ ((noreturn)); | |
79 | ||
80 | static void unhandled_fault(unsigned long address, struct task_struct *tsk, | |
81 | struct pt_regs *regs) | |
82 | { | |
83 | if((unsigned long) address < PAGE_SIZE) { | |
84 | printk(KERN_ALERT | |
85 | "Unable to handle kernel NULL pointer dereference\n"); | |
86 | } else { | |
87 | printk(KERN_ALERT "Unable to handle kernel paging request " | |
88 | "at virtual address %08lx\n", address); | |
89 | } | |
90 | printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", | |
91 | (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); | |
92 | printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", | |
93 | (tsk->mm ? (unsigned long) tsk->mm->pgd : | |
94 | (unsigned long) tsk->active_mm->pgd)); | |
95 | die_if_kernel("Oops", regs); | |
96 | } | |
97 | ||
98 | asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, | |
99 | unsigned long address) | |
100 | { | |
101 | struct pt_regs regs; | |
102 | unsigned long g2; | |
103 | unsigned int insn; | |
104 | int i; | |
105 | ||
106 | i = search_extables_range(ret_pc, &g2); | |
107 | switch (i) { | |
108 | case 3: | |
109 | /* load & store will be handled by fixup */ | |
110 | return 3; | |
111 | ||
112 | case 1: | |
113 | /* store will be handled by fixup, load will bump out */ | |
114 | /* for _to_ macros */ | |
115 | insn = *((unsigned int *) pc); | |
116 | if ((insn >> 21) & 1) | |
117 | return 1; | |
118 | break; | |
119 | ||
120 | case 2: | |
121 | /* load will be handled by fixup, store will bump out */ | |
122 | /* for _from_ macros */ | |
123 | insn = *((unsigned int *) pc); | |
124 | if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) | |
125 | return 2; | |
126 | break; | |
127 | ||
128 | default: | |
129 | break; | |
6cb79b3f | 130 | } |
1da177e4 LT |
131 | |
132 | memset(®s, 0, sizeof (regs)); | |
133 | regs.pc = pc; | |
134 | regs.npc = pc + 4; | |
135 | __asm__ __volatile__( | |
136 | "rd %%psr, %0\n\t" | |
137 | "nop\n\t" | |
138 | "nop\n\t" | |
139 | "nop\n" : "=r" (regs.psr)); | |
140 | unhandled_fault(address, current, ®s); | |
141 | ||
142 | /* Not reached */ | |
143 | return 0; | |
144 | } | |
145 | ||
4b177647 DM |
146 | static inline void |
147 | show_signal_msg(struct pt_regs *regs, int sig, int code, | |
148 | unsigned long address, struct task_struct *tsk) | |
149 | { | |
150 | if (!unhandled_signal(tsk, sig)) | |
151 | return; | |
152 | ||
153 | if (!printk_ratelimit()) | |
154 | return; | |
155 | ||
156 | printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", | |
157 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | |
158 | tsk->comm, task_pid_nr(tsk), address, | |
159 | (void *)regs->pc, (void *)regs->u_regs[UREG_I7], | |
160 | (void *)regs->u_regs[UREG_FP], code); | |
161 | ||
162 | print_vma_addr(KERN_CONT " in ", regs->pc); | |
163 | ||
164 | printk(KERN_CONT "\n"); | |
165 | } | |
166 | ||
167 | static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, | |
168 | unsigned long addr) | |
169 | { | |
170 | siginfo_t info; | |
171 | ||
172 | info.si_signo = sig; | |
173 | info.si_code = code; | |
174 | info.si_errno = 0; | |
175 | info.si_addr = (void __user *) addr; | |
176 | info.si_trapno = 0; | |
177 | ||
178 | if (unlikely(show_unhandled_signals)) | |
179 | show_signal_msg(regs, sig, info.si_code, | |
180 | addr, current); | |
181 | ||
182 | force_sig_info (sig, &info, current); | |
183 | } | |
184 | ||
1da177e4 LT |
185 | extern unsigned long safe_compute_effective_address(struct pt_regs *, |
186 | unsigned int); | |
187 | ||
188 | static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) | |
189 | { | |
190 | unsigned int insn; | |
191 | ||
192 | if (text_fault) | |
193 | return regs->pc; | |
194 | ||
195 | if (regs->psr & PSR_PS) { | |
196 | insn = *(unsigned int *) regs->pc; | |
197 | } else { | |
198 | __get_user(insn, (unsigned int *) regs->pc); | |
199 | } | |
200 | ||
201 | return safe_compute_effective_address(regs, insn); | |
202 | } | |
203 | ||
4b177647 DM |
204 | static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, |
205 | int text_fault) | |
206 | { | |
207 | unsigned long addr = compute_si_addr(regs, text_fault); | |
208 | ||
209 | __do_fault_siginfo(code, sig, regs, addr); | |
210 | } | |
211 | ||
1da177e4 LT |
212 | asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, |
213 | unsigned long address) | |
214 | { | |
215 | struct vm_area_struct *vma; | |
216 | struct task_struct *tsk = current; | |
217 | struct mm_struct *mm = tsk->mm; | |
218 | unsigned int fixup; | |
219 | unsigned long g2; | |
1da177e4 | 220 | int from_user = !(regs->psr & PSR_PS); |
4b177647 | 221 | int fault, code; |
c29554f5 KC |
222 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
223 | (write ? FAULT_FLAG_WRITE : 0)); | |
1da177e4 LT |
224 | |
225 | if(text_fault) | |
226 | address = regs->pc; | |
227 | ||
228 | /* | |
229 | * We fault-in kernel-space virtual memory on-demand. The | |
230 | * 'reference' page table is init_mm.pgd. | |
231 | * | |
232 | * NOTE! We MUST NOT take any locks for this case. We may | |
233 | * be in an interrupt or a critical region, and should | |
234 | * only copy the information from the master page table, | |
235 | * nothing more. | |
236 | */ | |
c816be7b | 237 | code = SEGV_MAPERR; |
5110bd21 | 238 | if (!ARCH_SUN4C && address >= TASK_SIZE) |
1da177e4 LT |
239 | goto vmalloc_fault; |
240 | ||
1da177e4 LT |
241 | /* |
242 | * If we're in an interrupt or have no user | |
243 | * context, we must not take the fault.. | |
244 | */ | |
245 | if (in_atomic() || !mm) | |
246 | goto no_context; | |
247 | ||
a8b0ca17 | 248 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
a084b667 | 249 | |
c29554f5 | 250 | retry: |
1da177e4 LT |
251 | down_read(&mm->mmap_sem); |
252 | ||
253 | /* | |
254 | * The kernel referencing a bad kernel pointer can lock up | |
255 | * a sun4c machine completely, so we must attempt recovery. | |
256 | */ | |
257 | if(!from_user && address >= PAGE_OFFSET) | |
258 | goto bad_area; | |
259 | ||
260 | vma = find_vma(mm, address); | |
261 | if(!vma) | |
262 | goto bad_area; | |
263 | if(vma->vm_start <= address) | |
264 | goto good_area; | |
265 | if(!(vma->vm_flags & VM_GROWSDOWN)) | |
266 | goto bad_area; | |
267 | if(expand_stack(vma, address)) | |
268 | goto bad_area; | |
269 | /* | |
270 | * Ok, we have a good vm_area for this memory access, so | |
271 | * we can handle it.. | |
272 | */ | |
273 | good_area: | |
4b177647 | 274 | code = SEGV_ACCERR; |
1da177e4 LT |
275 | if(write) { |
276 | if(!(vma->vm_flags & VM_WRITE)) | |
277 | goto bad_area; | |
278 | } else { | |
279 | /* Allow reads even for write-only mappings */ | |
280 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
281 | goto bad_area; | |
282 | } | |
283 | ||
284 | /* | |
285 | * If for any reason at all we couldn't handle the fault, | |
286 | * make sure we exit gracefully rather than endlessly redo | |
287 | * the fault. | |
288 | */ | |
c29554f5 KC |
289 | fault = handle_mm_fault(mm, vma, address, flags); |
290 | ||
291 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | |
292 | return; | |
293 | ||
83c54070 NP |
294 | if (unlikely(fault & VM_FAULT_ERROR)) { |
295 | if (fault & VM_FAULT_OOM) | |
296 | goto out_of_memory; | |
297 | else if (fault & VM_FAULT_SIGBUS) | |
298 | goto do_sigbus; | |
299 | BUG(); | |
300 | } | |
c29554f5 KC |
301 | |
302 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
303 | if (fault & VM_FAULT_MAJOR) { | |
304 | current->maj_flt++; | |
305 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, | |
306 | 1, regs, address); | |
307 | } else { | |
308 | current->min_flt++; | |
309 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | |
310 | 1, regs, address); | |
311 | } | |
312 | if (fault & VM_FAULT_RETRY) { | |
313 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
314 | ||
315 | /* No need to up_read(&mm->mmap_sem) as we would | |
316 | * have already released it in __lock_page_or_retry | |
317 | * in mm/filemap.c. | |
318 | */ | |
319 | ||
320 | goto retry; | |
321 | } | |
a084b667 | 322 | } |
c29554f5 | 323 | |
1da177e4 LT |
324 | up_read(&mm->mmap_sem); |
325 | return; | |
326 | ||
327 | /* | |
328 | * Something tried to access memory that isn't in our memory map.. | |
329 | * Fix it, but check if it's kernel or user first.. | |
330 | */ | |
331 | bad_area: | |
332 | up_read(&mm->mmap_sem); | |
333 | ||
334 | bad_area_nosemaphore: | |
335 | /* User mode accesses just cause a SIGSEGV */ | |
4b177647 DM |
336 | if (from_user) { |
337 | do_fault_siginfo(code, SIGSEGV, regs, text_fault); | |
1da177e4 LT |
338 | return; |
339 | } | |
340 | ||
341 | /* Is this in ex_table? */ | |
342 | no_context: | |
343 | g2 = regs->u_regs[UREG_G2]; | |
0157141a SR |
344 | if (!from_user) { |
345 | fixup = search_extables_range(regs->pc, &g2); | |
1da177e4 LT |
346 | if (fixup > 10) { /* Values below are reserved for other things */ |
347 | extern const unsigned __memset_start[]; | |
348 | extern const unsigned __memset_end[]; | |
349 | extern const unsigned __csum_partial_copy_start[]; | |
350 | extern const unsigned __csum_partial_copy_end[]; | |
351 | ||
352 | #ifdef DEBUG_EXCEPTIONS | |
353 | printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); | |
354 | printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", | |
355 | regs->pc, fixup, g2); | |
356 | #endif | |
357 | if ((regs->pc >= (unsigned long)__memset_start && | |
358 | regs->pc < (unsigned long)__memset_end) || | |
359 | (regs->pc >= (unsigned long)__csum_partial_copy_start && | |
360 | regs->pc < (unsigned long)__csum_partial_copy_end)) { | |
361 | regs->u_regs[UREG_I4] = address; | |
362 | regs->u_regs[UREG_I5] = regs->pc; | |
363 | } | |
364 | regs->u_regs[UREG_G2] = g2; | |
365 | regs->pc = fixup; | |
366 | regs->npc = regs->pc + 4; | |
367 | return; | |
368 | } | |
369 | } | |
370 | ||
371 | unhandled_fault (address, tsk, regs); | |
372 | do_exit(SIGKILL); | |
373 | ||
374 | /* | |
375 | * We ran out of memory, or some other thing happened to us that made | |
376 | * us unable to handle the page fault gracefully. | |
377 | */ | |
378 | out_of_memory: | |
379 | up_read(&mm->mmap_sem); | |
a923c28f DM |
380 | if (from_user) { |
381 | pagefault_out_of_memory(); | |
382 | return; | |
383 | } | |
1da177e4 LT |
384 | goto no_context; |
385 | ||
386 | do_sigbus: | |
387 | up_read(&mm->mmap_sem); | |
4b177647 | 388 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); |
1da177e4 LT |
389 | if (!from_user) |
390 | goto no_context; | |
391 | ||
392 | vmalloc_fault: | |
393 | { | |
394 | /* | |
395 | * Synchronize this task's top level page-table | |
396 | * with the 'reference' page table. | |
397 | */ | |
398 | int offset = pgd_index(address); | |
399 | pgd_t *pgd, *pgd_k; | |
400 | pmd_t *pmd, *pmd_k; | |
401 | ||
402 | pgd = tsk->active_mm->pgd + offset; | |
403 | pgd_k = init_mm.pgd + offset; | |
404 | ||
405 | if (!pgd_present(*pgd)) { | |
406 | if (!pgd_present(*pgd_k)) | |
407 | goto bad_area_nosemaphore; | |
408 | pgd_val(*pgd) = pgd_val(*pgd_k); | |
409 | return; | |
410 | } | |
411 | ||
412 | pmd = pmd_offset(pgd, address); | |
413 | pmd_k = pmd_offset(pgd_k, address); | |
414 | ||
415 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | |
416 | goto bad_area_nosemaphore; | |
417 | *pmd = *pmd_k; | |
418 | return; | |
419 | } | |
420 | } | |
421 | ||
1da177e4 | 422 | /* This always deals with user addresses. */ |
50215d65 | 423 | static void force_user_fault(unsigned long address, int write) |
1da177e4 LT |
424 | { |
425 | struct vm_area_struct *vma; | |
426 | struct task_struct *tsk = current; | |
427 | struct mm_struct *mm = tsk->mm; | |
4b177647 | 428 | int code; |
1da177e4 | 429 | |
4b177647 | 430 | code = SEGV_MAPERR; |
1da177e4 | 431 | |
1da177e4 LT |
432 | down_read(&mm->mmap_sem); |
433 | vma = find_vma(mm, address); | |
434 | if(!vma) | |
435 | goto bad_area; | |
436 | if(vma->vm_start <= address) | |
437 | goto good_area; | |
438 | if(!(vma->vm_flags & VM_GROWSDOWN)) | |
439 | goto bad_area; | |
440 | if(expand_stack(vma, address)) | |
441 | goto bad_area; | |
442 | good_area: | |
4b177647 | 443 | code = SEGV_ACCERR; |
1da177e4 LT |
444 | if(write) { |
445 | if(!(vma->vm_flags & VM_WRITE)) | |
446 | goto bad_area; | |
447 | } else { | |
448 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
449 | goto bad_area; | |
450 | } | |
d06063cc | 451 | switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { |
1da177e4 LT |
452 | case VM_FAULT_SIGBUS: |
453 | case VM_FAULT_OOM: | |
454 | goto do_sigbus; | |
455 | } | |
456 | up_read(&mm->mmap_sem); | |
457 | return; | |
458 | bad_area: | |
459 | up_read(&mm->mmap_sem); | |
4b177647 | 460 | __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); |
1da177e4 LT |
461 | return; |
462 | ||
463 | do_sigbus: | |
464 | up_read(&mm->mmap_sem); | |
4b177647 | 465 | __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); |
1da177e4 LT |
466 | } |
467 | ||
9088333e DM |
468 | static void check_stack_aligned(unsigned long sp) |
469 | { | |
470 | if (sp & 0x7UL) | |
471 | force_sig(SIGILL, current); | |
472 | } | |
473 | ||
1da177e4 LT |
474 | void window_overflow_fault(void) |
475 | { | |
476 | unsigned long sp; | |
477 | ||
478 | sp = current_thread_info()->rwbuf_stkptrs[0]; | |
479 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | |
480 | force_user_fault(sp + 0x38, 1); | |
481 | force_user_fault(sp, 1); | |
9088333e DM |
482 | |
483 | check_stack_aligned(sp); | |
1da177e4 LT |
484 | } |
485 | ||
486 | void window_underflow_fault(unsigned long sp) | |
487 | { | |
488 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | |
489 | force_user_fault(sp + 0x38, 0); | |
490 | force_user_fault(sp, 0); | |
9088333e DM |
491 | |
492 | check_stack_aligned(sp); | |
1da177e4 LT |
493 | } |
494 | ||
495 | void window_ret_fault(struct pt_regs *regs) | |
496 | { | |
497 | unsigned long sp; | |
498 | ||
499 | sp = regs->u_regs[UREG_FP]; | |
500 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | |
501 | force_user_fault(sp + 0x38, 0); | |
502 | force_user_fault(sp, 0); | |
9088333e DM |
503 | |
504 | check_stack_aligned(sp); | |
1da177e4 | 505 | } |