Commit | Line | Data |
---|---|---|
26ff6c11 PM |
1 | /* |
2 | * Page fault handler for SH with an MMU. | |
1da177e4 | 3 | * |
1da177e4 | 4 | * Copyright (C) 1999 Niibe Yutaka |
dbdb4e9f | 5 | * Copyright (C) 2003 - 2012 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/fault.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
26ff6c11 PM |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
1da177e4 | 13 | */ |
1da177e4 | 14 | #include <linux/kernel.h> |
1da177e4 | 15 | #include <linux/mm.h> |
0f08f338 PM |
16 | #include <linux/hardirq.h> |
17 | #include <linux/kprobes.h> | |
cdd6c482 | 18 | #include <linux/perf_event.h> |
dbdb4e9f | 19 | #include <linux/kdebug.h> |
e7cc9a73 | 20 | #include <asm/io_trapped.h> |
1da177e4 | 21 | #include <asm/mmu_context.h> |
db2e1fa3 | 22 | #include <asm/tlbflush.h> |
e839ca52 | 23 | #include <asm/traps.h> |
1da177e4 | 24 | |
7433ab77 PM |
25 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
26 | { | |
27 | int ret = 0; | |
28 | ||
c63c3105 | 29 | if (kprobes_built_in() && !user_mode(regs)) { |
7433ab77 PM |
30 | preempt_disable(); |
31 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | |
32 | ret = 1; | |
33 | preempt_enable(); | |
34 | } | |
7433ab77 PM |
35 | |
36 | return ret; | |
37 | } | |
38 | ||
dbdb4e9f PM |
39 | static void |
40 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
41 | struct task_struct *tsk) | |
42 | { | |
43 | siginfo_t info; | |
44 | ||
45 | info.si_signo = si_signo; | |
46 | info.si_errno = 0; | |
47 | info.si_code = si_code; | |
48 | info.si_addr = (void __user *)address; | |
49 | ||
50 | force_sig_info(si_signo, &info, tsk); | |
51 | } | |
52 | ||
45c0e0e2 SM |
53 | /* |
54 | * This is useful to dump out the page tables associated with | |
55 | * 'addr' in mm 'mm'. | |
56 | */ | |
57 | static void show_pte(struct mm_struct *mm, unsigned long addr) | |
58 | { | |
59 | pgd_t *pgd; | |
60 | ||
61 | if (mm) | |
62 | pgd = mm->pgd; | |
63 | else | |
64 | pgd = get_TTB(); | |
65 | ||
66 | printk(KERN_ALERT "pgd = %p\n", pgd); | |
67 | pgd += pgd_index(addr); | |
68 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, | |
69 | sizeof(*pgd) * 2, (u64)pgd_val(*pgd)); | |
70 | ||
71 | do { | |
72 | pud_t *pud; | |
73 | pmd_t *pmd; | |
74 | pte_t *pte; | |
75 | ||
76 | if (pgd_none(*pgd)) | |
77 | break; | |
78 | ||
79 | if (pgd_bad(*pgd)) { | |
80 | printk("(bad)"); | |
81 | break; | |
82 | } | |
83 | ||
84 | pud = pud_offset(pgd, addr); | |
85 | if (PTRS_PER_PUD != 1) | |
86 | printk(", *pud=%0*Lx", sizeof(*pud) * 2, | |
87 | (u64)pud_val(*pud)); | |
88 | ||
89 | if (pud_none(*pud)) | |
90 | break; | |
91 | ||
92 | if (pud_bad(*pud)) { | |
93 | printk("(bad)"); | |
94 | break; | |
95 | } | |
96 | ||
97 | pmd = pmd_offset(pud, addr); | |
98 | if (PTRS_PER_PMD != 1) | |
99 | printk(", *pmd=%0*Lx", sizeof(*pmd) * 2, | |
100 | (u64)pmd_val(*pmd)); | |
101 | ||
102 | if (pmd_none(*pmd)) | |
103 | break; | |
104 | ||
105 | if (pmd_bad(*pmd)) { | |
106 | printk("(bad)"); | |
107 | break; | |
108 | } | |
109 | ||
110 | /* We must not map this if we have highmem enabled */ | |
111 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) | |
112 | break; | |
113 | ||
114 | pte = pte_offset_kernel(pmd, addr); | |
115 | printk(", *pte=%0*Lx", sizeof(*pte) * 2, (u64)pte_val(*pte)); | |
116 | } while (0); | |
117 | ||
118 | printk("\n"); | |
119 | } | |
120 | ||
0f60bb25 PM |
121 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
122 | { | |
123 | unsigned index = pgd_index(address); | |
124 | pgd_t *pgd_k; | |
125 | pud_t *pud, *pud_k; | |
126 | pmd_t *pmd, *pmd_k; | |
127 | ||
128 | pgd += index; | |
129 | pgd_k = init_mm.pgd + index; | |
130 | ||
131 | if (!pgd_present(*pgd_k)) | |
132 | return NULL; | |
133 | ||
134 | pud = pud_offset(pgd, address); | |
135 | pud_k = pud_offset(pgd_k, address); | |
136 | if (!pud_present(*pud_k)) | |
137 | return NULL; | |
138 | ||
5d9b4b19 MF |
139 | if (!pud_present(*pud)) |
140 | set_pud(pud, *pud_k); | |
141 | ||
0f60bb25 PM |
142 | pmd = pmd_offset(pud, address); |
143 | pmd_k = pmd_offset(pud_k, address); | |
144 | if (!pmd_present(*pmd_k)) | |
145 | return NULL; | |
146 | ||
147 | if (!pmd_present(*pmd)) | |
148 | set_pmd(pmd, *pmd_k); | |
05dd2cd3 MF |
149 | else { |
150 | /* | |
151 | * The page tables are fully synchronised so there must | |
152 | * be another reason for the fault. Return NULL here to | |
153 | * signal that we have not taken care of the fault. | |
154 | */ | |
0f60bb25 | 155 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
05dd2cd3 MF |
156 | return NULL; |
157 | } | |
0f60bb25 PM |
158 | |
159 | return pmd_k; | |
160 | } | |
161 | ||
162 | /* | |
163 | * Handle a fault on the vmalloc or module mapping area | |
164 | */ | |
165 | static noinline int vmalloc_fault(unsigned long address) | |
166 | { | |
167 | pgd_t *pgd_k; | |
168 | pmd_t *pmd_k; | |
169 | pte_t *pte_k; | |
170 | ||
0906a3ad | 171 | /* Make sure we are in vmalloc/module/P3 area: */ |
8d9a784d | 172 | if (!(address >= P3SEG && address < P3_ADDR_MAX)) |
0f60bb25 PM |
173 | return -1; |
174 | ||
175 | /* | |
176 | * Synchronize this task's top level page-table | |
177 | * with the 'reference' page table. | |
178 | * | |
179 | * Do _not_ use "current" here. We might be inside | |
180 | * an interrupt in the middle of a task switch.. | |
181 | */ | |
182 | pgd_k = get_TTB(); | |
05dd2cd3 | 183 | pmd_k = vmalloc_sync_one(pgd_k, address); |
0f60bb25 PM |
184 | if (!pmd_k) |
185 | return -1; | |
186 | ||
187 | pte_k = pte_offset_kernel(pmd_k, address); | |
188 | if (!pte_present(*pte_k)) | |
189 | return -1; | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
dbdb4e9f PM |
194 | static void |
195 | show_fault_oops(struct pt_regs *regs, unsigned long address) | |
196 | { | |
197 | if (!oops_may_print()) | |
198 | return; | |
199 | ||
200 | printk(KERN_ALERT "BUG: unable to handle kernel "); | |
201 | if (address < PAGE_SIZE) | |
202 | printk(KERN_CONT "NULL pointer dereference"); | |
203 | else | |
204 | printk(KERN_CONT "paging request"); | |
205 | ||
206 | printk(KERN_CONT " at %08lx\n", address); | |
207 | printk(KERN_ALERT "PC:"); | |
208 | printk_address(regs->pc, 1); | |
209 | ||
210 | show_pte(NULL, address); | |
211 | } | |
212 | ||
213 | static noinline void | |
214 | no_context(struct pt_regs *regs, unsigned long writeaccess, | |
215 | unsigned long address) | |
216 | { | |
217 | /* Are we prepared to handle this kernel fault? */ | |
218 | if (fixup_exception(regs)) | |
219 | return; | |
220 | ||
221 | if (handle_trapped_io(regs, address)) | |
222 | return; | |
223 | ||
224 | /* | |
225 | * Oops. The kernel tried to access some bad page. We'll have to | |
226 | * terminate things with extreme prejudice. | |
227 | */ | |
228 | bust_spinlocks(1); | |
229 | ||
230 | show_fault_oops(regs, address); | |
231 | ||
232 | die("Oops", regs, writeaccess); | |
233 | bust_spinlocks(0); | |
234 | do_exit(SIGKILL); | |
235 | } | |
236 | ||
237 | static void | |
238 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | |
239 | unsigned long address, int si_code) | |
240 | { | |
241 | struct task_struct *tsk = current; | |
242 | ||
243 | /* User mode accesses just cause a SIGSEGV */ | |
244 | if (user_mode(regs)) { | |
245 | /* | |
246 | * It's possible to have interrupts off here: | |
247 | */ | |
248 | local_irq_enable(); | |
249 | ||
250 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | |
251 | ||
252 | return; | |
253 | } | |
254 | ||
255 | no_context(regs, writeaccess, address); | |
256 | } | |
257 | ||
258 | static noinline void | |
259 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | |
260 | unsigned long address) | |
261 | { | |
262 | __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR); | |
263 | } | |
264 | ||
265 | static void | |
266 | __bad_area(struct pt_regs *regs, unsigned long writeaccess, | |
267 | unsigned long address, int si_code) | |
268 | { | |
269 | struct mm_struct *mm = current->mm; | |
270 | ||
271 | /* | |
272 | * Something tried to access memory that isn't in our memory map.. | |
273 | * Fix it, but check if it's kernel or user first.. | |
274 | */ | |
275 | up_read(&mm->mmap_sem); | |
276 | ||
277 | __bad_area_nosemaphore(regs, writeaccess, address, si_code); | |
278 | } | |
279 | ||
280 | static noinline void | |
281 | bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | |
282 | { | |
283 | __bad_area(regs, writeaccess, address, SEGV_MAPERR); | |
284 | } | |
285 | ||
286 | static noinline void | |
287 | bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess, | |
288 | unsigned long address) | |
289 | { | |
290 | __bad_area(regs, writeaccess, address, SEGV_ACCERR); | |
291 | } | |
292 | ||
293 | static void out_of_memory(void) | |
294 | { | |
295 | /* | |
296 | * We ran out of memory, call the OOM killer, and return the userspace | |
297 | * (which will retry the fault, or kill us if we got oom-killed): | |
298 | */ | |
299 | up_read(¤t->mm->mmap_sem); | |
300 | ||
301 | pagefault_out_of_memory(); | |
302 | } | |
303 | ||
304 | static void | |
305 | do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | |
306 | { | |
307 | struct task_struct *tsk = current; | |
308 | struct mm_struct *mm = tsk->mm; | |
309 | ||
310 | up_read(&mm->mmap_sem); | |
311 | ||
312 | /* Kernel mode? Handle exceptions or die: */ | |
313 | if (!user_mode(regs)) | |
314 | no_context(regs, writeaccess, address); | |
315 | ||
316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | |
317 | } | |
318 | ||
319 | static noinline int | |
320 | mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | |
321 | unsigned long address, unsigned int fault) | |
322 | { | |
323 | /* | |
324 | * Pagefault was interrupted by SIGKILL. We have no reason to | |
325 | * continue pagefault. | |
326 | */ | |
327 | if (fatal_signal_pending(current)) { | |
328 | if (!(fault & VM_FAULT_RETRY)) | |
329 | up_read(¤t->mm->mmap_sem); | |
330 | if (!user_mode(regs)) | |
331 | no_context(regs, writeaccess, address); | |
332 | return 1; | |
333 | } | |
334 | ||
335 | if (!(fault & VM_FAULT_ERROR)) | |
336 | return 0; | |
337 | ||
338 | if (fault & VM_FAULT_OOM) { | |
339 | /* Kernel mode? Handle exceptions or die: */ | |
340 | if (!user_mode(regs)) { | |
341 | up_read(¤t->mm->mmap_sem); | |
342 | no_context(regs, writeaccess, address); | |
343 | return 1; | |
344 | } | |
345 | ||
346 | out_of_memory(); | |
347 | } else { | |
348 | if (fault & VM_FAULT_SIGBUS) | |
349 | do_sigbus(regs, writeaccess, address); | |
350 | else | |
351 | BUG(); | |
352 | } | |
353 | ||
354 | return 1; | |
355 | } | |
356 | ||
357 | static inline int access_error(int write, struct vm_area_struct *vma) | |
358 | { | |
359 | if (write) { | |
360 | /* write, present and write, not present: */ | |
361 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
362 | return 1; | |
363 | return 0; | |
364 | } | |
365 | ||
366 | /* read, not present: */ | |
367 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
368 | return 1; | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
0f60bb25 PM |
373 | static int fault_in_kernel_space(unsigned long address) |
374 | { | |
375 | return address >= TASK_SIZE; | |
376 | } | |
377 | ||
1da177e4 LT |
378 | /* |
379 | * This routine handles page faults. It determines the address, | |
380 | * and the problem, and then passes it off to one of the appropriate | |
381 | * routines. | |
382 | */ | |
b5a1bcbe SM |
383 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
384 | unsigned long writeaccess, | |
385 | unsigned long address) | |
1da177e4 | 386 | { |
0f60bb25 | 387 | unsigned long vec; |
1da177e4 LT |
388 | struct task_struct *tsk; |
389 | struct mm_struct *mm; | |
390 | struct vm_area_struct * vma; | |
83c54070 | 391 | int fault; |
11fd9824 KC |
392 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
393 | (writeaccess ? FAULT_FLAG_WRITE : 0)); | |
1da177e4 | 394 | |
1da177e4 | 395 | tsk = current; |
0f60bb25 | 396 | mm = tsk->mm; |
0f60bb25 | 397 | vec = lookup_exception_vector(); |
1da177e4 | 398 | |
0f60bb25 PM |
399 | /* |
400 | * We fault-in kernel-space virtual memory on-demand. The | |
401 | * 'reference' page table is init_mm.pgd. | |
402 | * | |
403 | * NOTE! We MUST NOT take any locks for this case. We may | |
404 | * be in an interrupt or a critical region, and should | |
405 | * only copy the information from the master page table, | |
406 | * nothing more. | |
407 | */ | |
408 | if (unlikely(fault_in_kernel_space(address))) { | |
409 | if (vmalloc_fault(address) >= 0) | |
99a596f9 | 410 | return; |
0f60bb25 | 411 | if (notify_page_fault(regs, vec)) |
96e14e54 | 412 | return; |
99a596f9 | 413 | |
dbdb4e9f PM |
414 | bad_area_nosemaphore(regs, writeaccess, address); |
415 | return; | |
99a596f9 SM |
416 | } |
417 | ||
0f60bb25 | 418 | if (unlikely(notify_page_fault(regs, vec))) |
7433ab77 PM |
419 | return; |
420 | ||
f2fb4e4f | 421 | /* Only enable interrupts if they were on before the fault */ |
7433ab77 | 422 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
f2fb4e4f | 423 | local_irq_enable(); |
f2fb4e4f | 424 | |
a8b0ca17 | 425 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
f2fb4e4f | 426 | |
1da177e4 | 427 | /* |
0f60bb25 PM |
428 | * If we're in an interrupt, have no user context or are running |
429 | * in an atomic region then we must not take the fault: | |
1da177e4 | 430 | */ |
dbdb4e9f PM |
431 | if (unlikely(in_atomic() || !mm)) { |
432 | bad_area_nosemaphore(regs, writeaccess, address); | |
433 | return; | |
434 | } | |
1da177e4 | 435 | |
11fd9824 | 436 | retry: |
1da177e4 LT |
437 | down_read(&mm->mmap_sem); |
438 | ||
439 | vma = find_vma(mm, address); | |
dbdb4e9f PM |
440 | if (unlikely(!vma)) { |
441 | bad_area(regs, writeaccess, address); | |
442 | return; | |
443 | } | |
444 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 445 | goto good_area; |
dbdb4e9f PM |
446 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
447 | bad_area(regs, writeaccess, address); | |
448 | return; | |
449 | } | |
450 | if (unlikely(expand_stack(vma, address))) { | |
451 | bad_area(regs, writeaccess, address); | |
452 | return; | |
453 | } | |
0f60bb25 PM |
454 | |
455 | /* | |
456 | * Ok, we have a good vm_area for this memory access, so | |
457 | * we can handle it.. | |
458 | */ | |
1da177e4 | 459 | good_area: |
dbdb4e9f PM |
460 | if (unlikely(access_error(writeaccess, vma))) { |
461 | bad_area_access_error(regs, writeaccess, address); | |
462 | return; | |
1da177e4 LT |
463 | } |
464 | ||
465 | /* | |
466 | * If for any reason at all we couldn't handle the fault, | |
467 | * make sure we exit gracefully rather than endlessly redo | |
468 | * the fault. | |
469 | */ | |
11fd9824 KC |
470 | fault = handle_mm_fault(mm, vma, address, flags); |
471 | ||
dbdb4e9f PM |
472 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
473 | if (mm_fault_error(regs, writeaccess, address, fault)) | |
474 | return; | |
11fd9824 KC |
475 | |
476 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
477 | if (fault & VM_FAULT_MAJOR) { | |
478 | tsk->maj_flt++; | |
479 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
480 | regs, address); | |
481 | } else { | |
482 | tsk->min_flt++; | |
483 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
484 | regs, address); | |
485 | } | |
486 | if (fault & VM_FAULT_RETRY) { | |
487 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
488 | ||
489 | /* | |
490 | * No need to up_read(&mm->mmap_sem) as we would | |
491 | * have already released it in __lock_page_or_retry | |
492 | * in mm/filemap.c. | |
493 | */ | |
494 | goto retry; | |
495 | } | |
7433ab77 | 496 | } |
1da177e4 LT |
497 | |
498 | up_read(&mm->mmap_sem); | |
1da177e4 | 499 | } |
db2e1fa3 | 500 | |
db2e1fa3 PM |
501 | /* |
502 | * Called with interrupts disabled. | |
503 | */ | |
112e5847 PM |
504 | asmlinkage int __kprobes |
505 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |
506 | unsigned long address) | |
db2e1fa3 PM |
507 | { |
508 | pgd_t *pgd; | |
509 | pud_t *pud; | |
510 | pmd_t *pmd; | |
511 | pte_t *pte; | |
512 | pte_t entry; | |
3d58695e | 513 | |
db2e1fa3 PM |
514 | /* |
515 | * We don't take page faults for P1, P2, and parts of P4, these | |
516 | * are always mapped, whether it be due to legacy behaviour in | |
517 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | |
518 | */ | |
519 | if (address >= P3SEG && address < P3_ADDR_MAX) { | |
520 | pgd = pgd_offset_k(address); | |
db2e1fa3 | 521 | } else { |
0f1a394b | 522 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
8010fbe7 | 523 | return 1; |
db2e1fa3 | 524 | |
0f1a394b | 525 | pgd = pgd_offset(current->mm, address); |
db2e1fa3 PM |
526 | } |
527 | ||
528 | pud = pud_offset(pgd, address); | |
529 | if (pud_none_or_clear_bad(pud)) | |
8010fbe7 | 530 | return 1; |
db2e1fa3 PM |
531 | pmd = pmd_offset(pud, address); |
532 | if (pmd_none_or_clear_bad(pmd)) | |
8010fbe7 | 533 | return 1; |
0f1a394b | 534 | pte = pte_offset_kernel(pmd, address); |
db2e1fa3 PM |
535 | entry = *pte; |
536 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | |
8010fbe7 | 537 | return 1; |
db2e1fa3 | 538 | if (unlikely(writeaccess && !pte_write(entry))) |
8010fbe7 | 539 | return 1; |
db2e1fa3 PM |
540 | |
541 | if (writeaccess) | |
542 | entry = pte_mkdirty(entry); | |
543 | entry = pte_mkyoung(entry); | |
544 | ||
8010fbe7 PM |
545 | set_pte(pte, entry); |
546 | ||
a602cc05 HS |
547 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) |
548 | /* | |
8010fbe7 PM |
549 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in |
550 | * the case of an initial page write exception, so we need to | |
551 | * flush it in order to avoid potential TLB entry duplication. | |
a602cc05 | 552 | */ |
8010fbe7 PM |
553 | if (writeaccess == 2) |
554 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | |
a602cc05 HS |
555 | #endif |
556 | ||
4b3073e1 | 557 | update_mmu_cache(NULL, address, pte); |
0f1a394b | 558 | |
8010fbe7 | 559 | return 0; |
db2e1fa3 | 560 | } |