Merge remote-tracking branches 'asoc/topic/cs4265', 'asoc/topic/cs42l56', 'asoc/topic...
[deliverable/linux.git] / arch / sparc / mm / fault_32.c
1 /*
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 #include <asm/head.h>
10
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
29 #include <asm/setup.h>
30 #include <asm/smp.h>
31 #include <asm/traps.h>
32 #include <asm/uaccess.h>
33
34 #include "mm_32.h"
35
36 int show_unhandled_signals = 1;
37
38 static void __noreturn unhandled_fault(unsigned long address,
39 struct task_struct *tsk,
40 struct pt_regs *regs)
41 {
42 if ((unsigned long) address < PAGE_SIZE) {
43 printk(KERN_ALERT
44 "Unable to handle kernel NULL pointer dereference\n");
45 } else {
46 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
47 address);
48 }
49 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
51 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
52 (tsk->mm ? (unsigned long) tsk->mm->pgd :
53 (unsigned long) tsk->active_mm->pgd));
54 die_if_kernel("Oops", regs);
55 }
56
57 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
58 unsigned long address)
59 {
60 struct pt_regs regs;
61 unsigned long g2;
62 unsigned int insn;
63 int i;
64
65 i = search_extables_range(ret_pc, &g2);
66 switch (i) {
67 case 3:
68 /* load & store will be handled by fixup */
69 return 3;
70
71 case 1:
72 /* store will be handled by fixup, load will bump out */
73 /* for _to_ macros */
74 insn = *((unsigned int *) pc);
75 if ((insn >> 21) & 1)
76 return 1;
77 break;
78
79 case 2:
80 /* load will be handled by fixup, store will bump out */
81 /* for _from_ macros */
82 insn = *((unsigned int *) pc);
83 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
84 return 2;
85 break;
86
87 default:
88 break;
89 }
90
91 memset(&regs, 0, sizeof(regs));
92 regs.pc = pc;
93 regs.npc = pc + 4;
94 __asm__ __volatile__(
95 "rd %%psr, %0\n\t"
96 "nop\n\t"
97 "nop\n\t"
98 "nop\n" : "=r" (regs.psr));
99 unhandled_fault(address, current, &regs);
100
101 /* Not reached */
102 return 0;
103 }
104
105 static inline void
106 show_signal_msg(struct pt_regs *regs, int sig, int code,
107 unsigned long address, struct task_struct *tsk)
108 {
109 if (!unhandled_signal(tsk, sig))
110 return;
111
112 if (!printk_ratelimit())
113 return;
114
115 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
116 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
117 tsk->comm, task_pid_nr(tsk), address,
118 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
119 (void *)regs->u_regs[UREG_FP], code);
120
121 print_vma_addr(KERN_CONT " in ", regs->pc);
122
123 printk(KERN_CONT "\n");
124 }
125
126 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
127 unsigned long addr)
128 {
129 siginfo_t info;
130
131 info.si_signo = sig;
132 info.si_code = code;
133 info.si_errno = 0;
134 info.si_addr = (void __user *) addr;
135 info.si_trapno = 0;
136
137 if (unlikely(show_unhandled_signals))
138 show_signal_msg(regs, sig, info.si_code,
139 addr, current);
140
141 force_sig_info (sig, &info, current);
142 }
143
144 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
145 {
146 unsigned int insn;
147
148 if (text_fault)
149 return regs->pc;
150
151 if (regs->psr & PSR_PS)
152 insn = *(unsigned int *) regs->pc;
153 else
154 __get_user(insn, (unsigned int *) regs->pc);
155
156 return safe_compute_effective_address(regs, insn);
157 }
158
159 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
160 int text_fault)
161 {
162 unsigned long addr = compute_si_addr(regs, text_fault);
163
164 __do_fault_siginfo(code, sig, regs, addr);
165 }
166
167 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
168 unsigned long address)
169 {
170 struct vm_area_struct *vma;
171 struct task_struct *tsk = current;
172 struct mm_struct *mm = tsk->mm;
173 unsigned int fixup;
174 unsigned long g2;
175 int from_user = !(regs->psr & PSR_PS);
176 int fault, code;
177 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
178
179 if (text_fault)
180 address = regs->pc;
181
182 /*
183 * We fault-in kernel-space virtual memory on-demand. The
184 * 'reference' page table is init_mm.pgd.
185 *
186 * NOTE! We MUST NOT take any locks for this case. We may
187 * be in an interrupt or a critical region, and should
188 * only copy the information from the master page table,
189 * nothing more.
190 */
191 code = SEGV_MAPERR;
192 if (address >= TASK_SIZE)
193 goto vmalloc_fault;
194
195 /*
196 * If we're in an interrupt or have no user
197 * context, we must not take the fault..
198 */
199 if (in_atomic() || !mm)
200 goto no_context;
201
202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
203
204 retry:
205 down_read(&mm->mmap_sem);
206
207 if (!from_user && address >= PAGE_OFFSET)
208 goto bad_area;
209
210 vma = find_vma(mm, address);
211 if (!vma)
212 goto bad_area;
213 if (vma->vm_start <= address)
214 goto good_area;
215 if (!(vma->vm_flags & VM_GROWSDOWN))
216 goto bad_area;
217 if (expand_stack(vma, address))
218 goto bad_area;
219 /*
220 * Ok, we have a good vm_area for this memory access, so
221 * we can handle it..
222 */
223 good_area:
224 code = SEGV_ACCERR;
225 if (write) {
226 if (!(vma->vm_flags & VM_WRITE))
227 goto bad_area;
228 } else {
229 /* Allow reads even for write-only mappings */
230 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
231 goto bad_area;
232 }
233
234 if (from_user)
235 flags |= FAULT_FLAG_USER;
236 if (write)
237 flags |= FAULT_FLAG_WRITE;
238
239 /*
240 * If for any reason at all we couldn't handle the fault,
241 * make sure we exit gracefully rather than endlessly redo
242 * the fault.
243 */
244 fault = handle_mm_fault(mm, vma, address, flags);
245
246 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
247 return;
248
249 if (unlikely(fault & VM_FAULT_ERROR)) {
250 if (fault & VM_FAULT_OOM)
251 goto out_of_memory;
252 else if (fault & VM_FAULT_SIGBUS)
253 goto do_sigbus;
254 BUG();
255 }
256
257 if (flags & FAULT_FLAG_ALLOW_RETRY) {
258 if (fault & VM_FAULT_MAJOR) {
259 current->maj_flt++;
260 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
261 1, regs, address);
262 } else {
263 current->min_flt++;
264 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
265 1, regs, address);
266 }
267 if (fault & VM_FAULT_RETRY) {
268 flags &= ~FAULT_FLAG_ALLOW_RETRY;
269 flags |= FAULT_FLAG_TRIED;
270
271 /* No need to up_read(&mm->mmap_sem) as we would
272 * have already released it in __lock_page_or_retry
273 * in mm/filemap.c.
274 */
275
276 goto retry;
277 }
278 }
279
280 up_read(&mm->mmap_sem);
281 return;
282
283 /*
284 * Something tried to access memory that isn't in our memory map..
285 * Fix it, but check if it's kernel or user first..
286 */
287 bad_area:
288 up_read(&mm->mmap_sem);
289
290 bad_area_nosemaphore:
291 /* User mode accesses just cause a SIGSEGV */
292 if (from_user) {
293 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
294 return;
295 }
296
297 /* Is this in ex_table? */
298 no_context:
299 g2 = regs->u_regs[UREG_G2];
300 if (!from_user) {
301 fixup = search_extables_range(regs->pc, &g2);
302 /* Values below 10 are reserved for other things */
303 if (fixup > 10) {
304 extern const unsigned __memset_start[];
305 extern const unsigned __memset_end[];
306 extern const unsigned __csum_partial_copy_start[];
307 extern const unsigned __csum_partial_copy_end[];
308
309 #ifdef DEBUG_EXCEPTIONS
310 printk("Exception: PC<%08lx> faddr<%08lx>\n",
311 regs->pc, address);
312 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
313 regs->pc, fixup, g2);
314 #endif
315 if ((regs->pc >= (unsigned long)__memset_start &&
316 regs->pc < (unsigned long)__memset_end) ||
317 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
318 regs->pc < (unsigned long)__csum_partial_copy_end)) {
319 regs->u_regs[UREG_I4] = address;
320 regs->u_regs[UREG_I5] = regs->pc;
321 }
322 regs->u_regs[UREG_G2] = g2;
323 regs->pc = fixup;
324 regs->npc = regs->pc + 4;
325 return;
326 }
327 }
328
329 unhandled_fault(address, tsk, regs);
330 do_exit(SIGKILL);
331
332 /*
333 * We ran out of memory, or some other thing happened to us that made
334 * us unable to handle the page fault gracefully.
335 */
336 out_of_memory:
337 up_read(&mm->mmap_sem);
338 if (from_user) {
339 pagefault_out_of_memory();
340 return;
341 }
342 goto no_context;
343
344 do_sigbus:
345 up_read(&mm->mmap_sem);
346 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
347 if (!from_user)
348 goto no_context;
349
350 vmalloc_fault:
351 {
352 /*
353 * Synchronize this task's top level page-table
354 * with the 'reference' page table.
355 */
356 int offset = pgd_index(address);
357 pgd_t *pgd, *pgd_k;
358 pmd_t *pmd, *pmd_k;
359
360 pgd = tsk->active_mm->pgd + offset;
361 pgd_k = init_mm.pgd + offset;
362
363 if (!pgd_present(*pgd)) {
364 if (!pgd_present(*pgd_k))
365 goto bad_area_nosemaphore;
366 pgd_val(*pgd) = pgd_val(*pgd_k);
367 return;
368 }
369
370 pmd = pmd_offset(pgd, address);
371 pmd_k = pmd_offset(pgd_k, address);
372
373 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
374 goto bad_area_nosemaphore;
375
376 *pmd = *pmd_k;
377 return;
378 }
379 }
380
381 /* This always deals with user addresses. */
382 static void force_user_fault(unsigned long address, int write)
383 {
384 struct vm_area_struct *vma;
385 struct task_struct *tsk = current;
386 struct mm_struct *mm = tsk->mm;
387 unsigned int flags = FAULT_FLAG_USER;
388 int code;
389
390 code = SEGV_MAPERR;
391
392 down_read(&mm->mmap_sem);
393 vma = find_vma(mm, address);
394 if (!vma)
395 goto bad_area;
396 if (vma->vm_start <= address)
397 goto good_area;
398 if (!(vma->vm_flags & VM_GROWSDOWN))
399 goto bad_area;
400 if (expand_stack(vma, address))
401 goto bad_area;
402 good_area:
403 code = SEGV_ACCERR;
404 if (write) {
405 if (!(vma->vm_flags & VM_WRITE))
406 goto bad_area;
407 flags |= FAULT_FLAG_WRITE;
408 } else {
409 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
410 goto bad_area;
411 }
412 switch (handle_mm_fault(mm, vma, address, flags)) {
413 case VM_FAULT_SIGBUS:
414 case VM_FAULT_OOM:
415 goto do_sigbus;
416 }
417 up_read(&mm->mmap_sem);
418 return;
419 bad_area:
420 up_read(&mm->mmap_sem);
421 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
422 return;
423
424 do_sigbus:
425 up_read(&mm->mmap_sem);
426 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
427 }
428
429 static void check_stack_aligned(unsigned long sp)
430 {
431 if (sp & 0x7UL)
432 force_sig(SIGILL, current);
433 }
434
435 void window_overflow_fault(void)
436 {
437 unsigned long sp;
438
439 sp = current_thread_info()->rwbuf_stkptrs[0];
440 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
441 force_user_fault(sp + 0x38, 1);
442 force_user_fault(sp, 1);
443
444 check_stack_aligned(sp);
445 }
446
447 void window_underflow_fault(unsigned long sp)
448 {
449 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
450 force_user_fault(sp + 0x38, 0);
451 force_user_fault(sp, 0);
452
453 check_stack_aligned(sp);
454 }
455
456 void window_ret_fault(struct pt_regs *regs)
457 {
458 unsigned long sp;
459
460 sp = regs->u_regs[UREG_FP];
461 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
462 force_user_fault(sp + 0x38, 0);
463 force_user_fault(sp, 0);
464
465 check_stack_aligned(sp);
466 }
This page took 0.045376 seconds and 6 git commands to generate.