sparc32: Trivial removal of sun4c references in comments.
[deliverable/linux.git] / arch / sparc / mm / fault_32.c
CommitLineData
88278ca2 1/*
1da177e4
LT
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
a084b667 21#include <linux/perf_event.h>
1da177e4 22#include <linux/interrupt.h>
1eeb66a1 23#include <linux/kdebug.h>
1da177e4 24
1da177e4
LT
25#include <asm/page.h>
26#include <asm/pgtable.h>
1da177e4
LT
27#include <asm/openprom.h>
28#include <asm/oplib.h>
29#include <asm/smp.h>
30#include <asm/traps.h>
1da177e4
LT
31#include <asm/uaccess.h>
32
1da177e4
LT
33extern int prom_node_root;
34
4b177647
DM
35int show_unhandled_signals = 1;
36
1da177e4
LT
37/* At boot time we determine these two values necessary for setting
38 * up the segment maps and page table entries (pte's).
39 */
40
41int num_segmaps, num_contexts;
42int invalid_segment;
43
44/* various Virtual Address Cache parameters we find at boot time... */
45
46int vac_size, vac_linesize, vac_do_hw_vac_flushes;
47int vac_entries_per_context, vac_entries_per_segment;
48int vac_entries_per_page;
49
9f2b2a5f
DM
50/* Return how much physical memory we have. */
51unsigned long probe_memory(void)
1da177e4 52{
9f2b2a5f
DM
53 unsigned long total = 0;
54 int i;
1da177e4 55
9f2b2a5f
DM
56 for (i = 0; sp_banks[i].num_bytes; i++)
57 total += sp_banks[i].num_bytes;
1da177e4 58
1da177e4
LT
59 return total;
60}
61
1da177e4
LT
62static void unhandled_fault(unsigned long, struct task_struct *,
63 struct pt_regs *) __attribute__ ((noreturn));
64
65static void unhandled_fault(unsigned long address, struct task_struct *tsk,
66 struct pt_regs *regs)
67{
68 if((unsigned long) address < PAGE_SIZE) {
69 printk(KERN_ALERT
70 "Unable to handle kernel NULL pointer dereference\n");
71 } else {
72 printk(KERN_ALERT "Unable to handle kernel paging request "
73 "at virtual address %08lx\n", address);
74 }
75 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
76 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
77 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
78 (tsk->mm ? (unsigned long) tsk->mm->pgd :
79 (unsigned long) tsk->active_mm->pgd));
80 die_if_kernel("Oops", regs);
81}
82
83asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
84 unsigned long address)
85{
86 struct pt_regs regs;
87 unsigned long g2;
88 unsigned int insn;
89 int i;
90
91 i = search_extables_range(ret_pc, &g2);
92 switch (i) {
93 case 3:
94 /* load & store will be handled by fixup */
95 return 3;
96
97 case 1:
98 /* store will be handled by fixup, load will bump out */
99 /* for _to_ macros */
100 insn = *((unsigned int *) pc);
101 if ((insn >> 21) & 1)
102 return 1;
103 break;
104
105 case 2:
106 /* load will be handled by fixup, store will bump out */
107 /* for _from_ macros */
108 insn = *((unsigned int *) pc);
109 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
110 return 2;
111 break;
112
113 default:
114 break;
6cb79b3f 115 }
1da177e4
LT
116
117 memset(&regs, 0, sizeof (regs));
118 regs.pc = pc;
119 regs.npc = pc + 4;
120 __asm__ __volatile__(
121 "rd %%psr, %0\n\t"
122 "nop\n\t"
123 "nop\n\t"
124 "nop\n" : "=r" (regs.psr));
125 unhandled_fault(address, current, &regs);
126
127 /* Not reached */
128 return 0;
129}
130
4b177647
DM
131static inline void
132show_signal_msg(struct pt_regs *regs, int sig, int code,
133 unsigned long address, struct task_struct *tsk)
134{
135 if (!unhandled_signal(tsk, sig))
136 return;
137
138 if (!printk_ratelimit())
139 return;
140
141 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
142 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
143 tsk->comm, task_pid_nr(tsk), address,
144 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
145 (void *)regs->u_regs[UREG_FP], code);
146
147 print_vma_addr(KERN_CONT " in ", regs->pc);
148
149 printk(KERN_CONT "\n");
150}
151
152static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
153 unsigned long addr)
154{
155 siginfo_t info;
156
157 info.si_signo = sig;
158 info.si_code = code;
159 info.si_errno = 0;
160 info.si_addr = (void __user *) addr;
161 info.si_trapno = 0;
162
163 if (unlikely(show_unhandled_signals))
164 show_signal_msg(regs, sig, info.si_code,
165 addr, current);
166
167 force_sig_info (sig, &info, current);
168}
169
1da177e4
LT
170extern unsigned long safe_compute_effective_address(struct pt_regs *,
171 unsigned int);
172
173static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
174{
175 unsigned int insn;
176
177 if (text_fault)
178 return regs->pc;
179
180 if (regs->psr & PSR_PS) {
181 insn = *(unsigned int *) regs->pc;
182 } else {
183 __get_user(insn, (unsigned int *) regs->pc);
184 }
185
186 return safe_compute_effective_address(regs, insn);
187}
188
4b177647
DM
189static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
190 int text_fault)
191{
192 unsigned long addr = compute_si_addr(regs, text_fault);
193
194 __do_fault_siginfo(code, sig, regs, addr);
195}
196
1da177e4
LT
197asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
198 unsigned long address)
199{
200 struct vm_area_struct *vma;
201 struct task_struct *tsk = current;
202 struct mm_struct *mm = tsk->mm;
203 unsigned int fixup;
204 unsigned long g2;
1da177e4 205 int from_user = !(regs->psr & PSR_PS);
4b177647 206 int fault, code;
c29554f5
KC
207 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
208 (write ? FAULT_FLAG_WRITE : 0));
1da177e4
LT
209
210 if(text_fault)
211 address = regs->pc;
212
213 /*
214 * We fault-in kernel-space virtual memory on-demand. The
215 * 'reference' page table is init_mm.pgd.
216 *
217 * NOTE! We MUST NOT take any locks for this case. We may
218 * be in an interrupt or a critical region, and should
219 * only copy the information from the master page table,
220 * nothing more.
221 */
c816be7b 222 code = SEGV_MAPERR;
582a0bae 223 if (address >= TASK_SIZE)
1da177e4
LT
224 goto vmalloc_fault;
225
1da177e4
LT
226 /*
227 * If we're in an interrupt or have no user
228 * context, we must not take the fault..
229 */
230 if (in_atomic() || !mm)
231 goto no_context;
232
a8b0ca17 233 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
a084b667 234
c29554f5 235retry:
1da177e4
LT
236 down_read(&mm->mmap_sem);
237
1da177e4
LT
238 if(!from_user && address >= PAGE_OFFSET)
239 goto bad_area;
240
241 vma = find_vma(mm, address);
242 if(!vma)
243 goto bad_area;
244 if(vma->vm_start <= address)
245 goto good_area;
246 if(!(vma->vm_flags & VM_GROWSDOWN))
247 goto bad_area;
248 if(expand_stack(vma, address))
249 goto bad_area;
250 /*
251 * Ok, we have a good vm_area for this memory access, so
252 * we can handle it..
253 */
254good_area:
4b177647 255 code = SEGV_ACCERR;
1da177e4
LT
256 if(write) {
257 if(!(vma->vm_flags & VM_WRITE))
258 goto bad_area;
259 } else {
260 /* Allow reads even for write-only mappings */
261 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
262 goto bad_area;
263 }
264
265 /*
266 * If for any reason at all we couldn't handle the fault,
267 * make sure we exit gracefully rather than endlessly redo
268 * the fault.
269 */
c29554f5
KC
270 fault = handle_mm_fault(mm, vma, address, flags);
271
272 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
273 return;
274
83c54070
NP
275 if (unlikely(fault & VM_FAULT_ERROR)) {
276 if (fault & VM_FAULT_OOM)
277 goto out_of_memory;
278 else if (fault & VM_FAULT_SIGBUS)
279 goto do_sigbus;
280 BUG();
281 }
c29554f5
KC
282
283 if (flags & FAULT_FLAG_ALLOW_RETRY) {
284 if (fault & VM_FAULT_MAJOR) {
285 current->maj_flt++;
286 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
287 1, regs, address);
288 } else {
289 current->min_flt++;
290 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
291 1, regs, address);
292 }
293 if (fault & VM_FAULT_RETRY) {
294 flags &= ~FAULT_FLAG_ALLOW_RETRY;
295
296 /* No need to up_read(&mm->mmap_sem) as we would
297 * have already released it in __lock_page_or_retry
298 * in mm/filemap.c.
299 */
300
301 goto retry;
302 }
a084b667 303 }
c29554f5 304
1da177e4
LT
305 up_read(&mm->mmap_sem);
306 return;
307
308 /*
309 * Something tried to access memory that isn't in our memory map..
310 * Fix it, but check if it's kernel or user first..
311 */
312bad_area:
313 up_read(&mm->mmap_sem);
314
315bad_area_nosemaphore:
316 /* User mode accesses just cause a SIGSEGV */
4b177647
DM
317 if (from_user) {
318 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
1da177e4
LT
319 return;
320 }
321
322 /* Is this in ex_table? */
323no_context:
324 g2 = regs->u_regs[UREG_G2];
0157141a
SR
325 if (!from_user) {
326 fixup = search_extables_range(regs->pc, &g2);
1da177e4
LT
327 if (fixup > 10) { /* Values below are reserved for other things */
328 extern const unsigned __memset_start[];
329 extern const unsigned __memset_end[];
330 extern const unsigned __csum_partial_copy_start[];
331 extern const unsigned __csum_partial_copy_end[];
332
333#ifdef DEBUG_EXCEPTIONS
334 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
335 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
336 regs->pc, fixup, g2);
337#endif
338 if ((regs->pc >= (unsigned long)__memset_start &&
339 regs->pc < (unsigned long)__memset_end) ||
340 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
341 regs->pc < (unsigned long)__csum_partial_copy_end)) {
342 regs->u_regs[UREG_I4] = address;
343 regs->u_regs[UREG_I5] = regs->pc;
344 }
345 regs->u_regs[UREG_G2] = g2;
346 regs->pc = fixup;
347 regs->npc = regs->pc + 4;
348 return;
349 }
350 }
351
352 unhandled_fault (address, tsk, regs);
353 do_exit(SIGKILL);
354
355/*
356 * We ran out of memory, or some other thing happened to us that made
357 * us unable to handle the page fault gracefully.
358 */
359out_of_memory:
360 up_read(&mm->mmap_sem);
a923c28f
DM
361 if (from_user) {
362 pagefault_out_of_memory();
363 return;
364 }
1da177e4
LT
365 goto no_context;
366
367do_sigbus:
368 up_read(&mm->mmap_sem);
4b177647 369 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
1da177e4
LT
370 if (!from_user)
371 goto no_context;
372
373vmalloc_fault:
374 {
375 /*
376 * Synchronize this task's top level page-table
377 * with the 'reference' page table.
378 */
379 int offset = pgd_index(address);
380 pgd_t *pgd, *pgd_k;
381 pmd_t *pmd, *pmd_k;
382
383 pgd = tsk->active_mm->pgd + offset;
384 pgd_k = init_mm.pgd + offset;
385
386 if (!pgd_present(*pgd)) {
387 if (!pgd_present(*pgd_k))
388 goto bad_area_nosemaphore;
389 pgd_val(*pgd) = pgd_val(*pgd_k);
390 return;
391 }
392
393 pmd = pmd_offset(pgd, address);
394 pmd_k = pmd_offset(pgd_k, address);
395
396 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
397 goto bad_area_nosemaphore;
398 *pmd = *pmd_k;
399 return;
400 }
401}
402
1da177e4 403/* This always deals with user addresses. */
50215d65 404static void force_user_fault(unsigned long address, int write)
1da177e4
LT
405{
406 struct vm_area_struct *vma;
407 struct task_struct *tsk = current;
408 struct mm_struct *mm = tsk->mm;
4b177647 409 int code;
1da177e4 410
4b177647 411 code = SEGV_MAPERR;
1da177e4 412
1da177e4
LT
413 down_read(&mm->mmap_sem);
414 vma = find_vma(mm, address);
415 if(!vma)
416 goto bad_area;
417 if(vma->vm_start <= address)
418 goto good_area;
419 if(!(vma->vm_flags & VM_GROWSDOWN))
420 goto bad_area;
421 if(expand_stack(vma, address))
422 goto bad_area;
423good_area:
4b177647 424 code = SEGV_ACCERR;
1da177e4
LT
425 if(write) {
426 if(!(vma->vm_flags & VM_WRITE))
427 goto bad_area;
428 } else {
429 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
430 goto bad_area;
431 }
d06063cc 432 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
1da177e4
LT
433 case VM_FAULT_SIGBUS:
434 case VM_FAULT_OOM:
435 goto do_sigbus;
436 }
437 up_read(&mm->mmap_sem);
438 return;
439bad_area:
440 up_read(&mm->mmap_sem);
4b177647 441 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
1da177e4
LT
442 return;
443
444do_sigbus:
445 up_read(&mm->mmap_sem);
4b177647 446 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
1da177e4
LT
447}
448
9088333e
DM
449static void check_stack_aligned(unsigned long sp)
450{
451 if (sp & 0x7UL)
452 force_sig(SIGILL, current);
453}
454
1da177e4
LT
455void window_overflow_fault(void)
456{
457 unsigned long sp;
458
459 sp = current_thread_info()->rwbuf_stkptrs[0];
460 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
461 force_user_fault(sp + 0x38, 1);
462 force_user_fault(sp, 1);
9088333e
DM
463
464 check_stack_aligned(sp);
1da177e4
LT
465}
466
467void window_underflow_fault(unsigned long sp)
468{
469 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
470 force_user_fault(sp + 0x38, 0);
471 force_user_fault(sp, 0);
9088333e
DM
472
473 check_stack_aligned(sp);
1da177e4
LT
474}
475
476void window_ret_fault(struct pt_regs *regs)
477{
478 unsigned long sp;
479
480 sp = regs->u_regs[UREG_FP];
481 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
482 force_user_fault(sp + 0x38, 0);
483 force_user_fault(sp, 0);
9088333e
DM
484
485 check_stack_aligned(sp);
1da177e4 486}
This page took 0.63714 seconds and 5 git commands to generate.