tile: remove support for TILE64
[deliverable/linux.git] / arch / tile / mm / fault.c
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * From i386 code copyright (C) 1995 Linus Torvalds
15 */
16
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/smp.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/tty.h>
30 #include <linux/vt_kern.h> /* For unblank_screen() */
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/kprobes.h>
34 #include <linux/hugetlb.h>
35 #include <linux/syscalls.h>
36 #include <linux/uaccess.h>
37 #include <linux/kdebug.h>
38
39 #include <asm/pgalloc.h>
40 #include <asm/sections.h>
41 #include <asm/traps.h>
42 #include <asm/syscalls.h>
43
44 #include <arch/interrupts.h>
45
46 static noinline void force_sig_info_fault(const char *type, int si_signo,
47 int si_code, unsigned long address,
48 int fault_num,
49 struct task_struct *tsk,
50 struct pt_regs *regs)
51 {
52 siginfo_t info;
53
54 if (unlikely(tsk->pid < 2)) {
55 panic("Signal %d (code %d) at %#lx sent to %s!",
56 si_signo, si_code & 0xffff, address,
57 is_idle_task(tsk) ? "the idle task" : "init");
58 }
59
60 info.si_signo = si_signo;
61 info.si_errno = 0;
62 info.si_code = si_code;
63 info.si_addr = (void __user *)address;
64 info.si_trapno = fault_num;
65 trace_unhandled_signal(type, regs, address, si_signo);
66 force_sig_info(si_signo, &info, tsk);
67 }
68
69 #ifndef __tilegx__
70 /*
71 * Synthesize the fault a PL0 process would get by doing a word-load of
72 * an unaligned address or a high kernel address.
73 */
74 SYSCALL_DEFINE1(cmpxchg_badaddr, unsigned long, address)
75 {
76 struct pt_regs *regs = current_pt_regs();
77
78 if (address >= PAGE_OFFSET)
79 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
80 address, INT_DTLB_MISS, current, regs);
81 else
82 force_sig_info_fault("atomic alignment fault", SIGBUS,
83 BUS_ADRALN, address,
84 INT_UNALIGN_DATA, current, regs);
85
86 /*
87 * Adjust pc to point at the actual instruction, which is unusual
88 * for syscalls normally, but is appropriate when we are claiming
89 * that a syscall swint1 caused a page fault or bus error.
90 */
91 regs->pc -= 8;
92
93 /*
94 * Mark this as a caller-save interrupt, like a normal page fault,
95 * so that when we go through the signal handler path we will
96 * properly restore r0, r1, and r2 for the signal handler arguments.
97 */
98 regs->flags |= PT_FLAGS_CALLER_SAVES;
99
100 return 0;
101 }
102 #endif
103
104 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
105 {
106 unsigned index = pgd_index(address);
107 pgd_t *pgd_k;
108 pud_t *pud, *pud_k;
109 pmd_t *pmd, *pmd_k;
110
111 pgd += index;
112 pgd_k = init_mm.pgd + index;
113
114 if (!pgd_present(*pgd_k))
115 return NULL;
116
117 pud = pud_offset(pgd, address);
118 pud_k = pud_offset(pgd_k, address);
119 if (!pud_present(*pud_k))
120 return NULL;
121
122 pmd = pmd_offset(pud, address);
123 pmd_k = pmd_offset(pud_k, address);
124 if (!pmd_present(*pmd_k))
125 return NULL;
126 if (!pmd_present(*pmd))
127 set_pmd(pmd, *pmd_k);
128 else
129 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
130 return pmd_k;
131 }
132
133 /*
134 * Handle a fault on the vmalloc area.
135 */
136 static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
137 {
138 pmd_t *pmd_k;
139 pte_t *pte_k;
140
141 /* Make sure we are in vmalloc area */
142 if (!(address >= VMALLOC_START && address < VMALLOC_END))
143 return -1;
144
145 /*
146 * Synchronize this task's top level page-table
147 * with the 'reference' page table.
148 */
149 pmd_k = vmalloc_sync_one(pgd, address);
150 if (!pmd_k)
151 return -1;
152 if (pmd_huge(*pmd_k))
153 return 0; /* support TILE huge_vmap() API */
154 pte_k = pte_offset_kernel(pmd_k, address);
155 if (!pte_present(*pte_k))
156 return -1;
157 return 0;
158 }
159
160 /* Wait until this PTE has completed migration. */
161 static void wait_for_migration(pte_t *pte)
162 {
163 if (pte_migrating(*pte)) {
164 /*
165 * Wait until the migrater fixes up this pte.
166 * We scale the loop count by the clock rate so we'll wait for
167 * a few seconds here.
168 */
169 int retries = 0;
170 int bound = get_clock_rate();
171 while (pte_migrating(*pte)) {
172 barrier();
173 if (++retries > bound)
174 panic("Hit migrating PTE (%#llx) and"
175 " page PFN %#lx still migrating",
176 pte->val, pte_pfn(*pte));
177 }
178 }
179 }
180
181 /*
182 * It's not generally safe to use "current" to get the page table pointer,
183 * since we might be running an oprofile interrupt in the middle of a
184 * task switch.
185 */
186 static pgd_t *get_current_pgd(void)
187 {
188 HV_Context ctx = hv_inquire_context();
189 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
190 struct page *pgd_page = pfn_to_page(pgd_pfn);
191 BUG_ON(PageHighMem(pgd_page));
192 return (pgd_t *) __va(ctx.page_table);
193 }
194
195 /*
196 * We can receive a page fault from a migrating PTE at any time.
197 * Handle it by just waiting until the fault resolves.
198 *
199 * It's also possible to get a migrating kernel PTE that resolves
200 * itself during the downcall from hypervisor to Linux. We just check
201 * here to see if the PTE seems valid, and if so we retry it.
202 *
203 * NOTE! We MUST NOT take any locks for this case. We may be in an
204 * interrupt or a critical region, and must do as little as possible.
205 * Similarly, we can't use atomic ops here, since we may be handling a
206 * fault caused by an atomic op access.
207 *
208 * If we find a migrating PTE while we're in an NMI context, and we're
209 * at a PC that has a registered exception handler, we don't wait,
210 * since this thread may (e.g.) have been interrupted while migrating
211 * its own stack, which would then cause us to self-deadlock.
212 */
213 static int handle_migrating_pte(pgd_t *pgd, int fault_num,
214 unsigned long address, unsigned long pc,
215 int is_kernel_mode, int write)
216 {
217 pud_t *pud;
218 pmd_t *pmd;
219 pte_t *pte;
220 pte_t pteval;
221
222 if (pgd_addr_invalid(address))
223 return 0;
224
225 pgd += pgd_index(address);
226 pud = pud_offset(pgd, address);
227 if (!pud || !pud_present(*pud))
228 return 0;
229 pmd = pmd_offset(pud, address);
230 if (!pmd || !pmd_present(*pmd))
231 return 0;
232 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
233 pte_offset_kernel(pmd, address);
234 pteval = *pte;
235 if (pte_migrating(pteval)) {
236 if (in_nmi() && search_exception_tables(pc))
237 return 0;
238 wait_for_migration(pte);
239 return 1;
240 }
241
242 if (!is_kernel_mode || !pte_present(pteval))
243 return 0;
244 if (fault_num == INT_ITLB_MISS) {
245 if (pte_exec(pteval))
246 return 1;
247 } else if (write) {
248 if (pte_write(pteval))
249 return 1;
250 } else {
251 if (pte_read(pteval))
252 return 1;
253 }
254
255 return 0;
256 }
257
258 /*
259 * This routine is responsible for faulting in user pages.
260 * It passes the work off to one of the appropriate routines.
261 * It returns true if the fault was successfully handled.
262 */
263 static int handle_page_fault(struct pt_regs *regs,
264 int fault_num,
265 int is_page_fault,
266 unsigned long address,
267 int write)
268 {
269 struct task_struct *tsk;
270 struct mm_struct *mm;
271 struct vm_area_struct *vma;
272 unsigned long stack_offset;
273 int fault;
274 int si_code;
275 int is_kernel_mode;
276 pgd_t *pgd;
277 unsigned int flags;
278
279 /* on TILE, protection faults are always writes */
280 if (!is_page_fault)
281 write = 1;
282
283 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
284 (write ? FAULT_FLAG_WRITE : 0));
285
286 is_kernel_mode = !user_mode(regs);
287
288 tsk = validate_current();
289
290 /*
291 * Check to see if we might be overwriting the stack, and bail
292 * out if so. The page fault code is a relatively likely
293 * place to get trapped in an infinite regress, and once we
294 * overwrite the whole stack, it becomes very hard to recover.
295 */
296 stack_offset = stack_pointer & (THREAD_SIZE-1);
297 if (stack_offset < THREAD_SIZE / 8) {
298 pr_alert("Potential stack overrun: sp %#lx\n",
299 stack_pointer);
300 show_regs(regs);
301 pr_alert("Killing current process %d/%s\n",
302 tsk->pid, tsk->comm);
303 do_group_exit(SIGKILL);
304 }
305
306 /*
307 * Early on, we need to check for migrating PTE entries;
308 * see homecache.c. If we find a migrating PTE, we wait until
309 * the backing page claims to be done migrating, then we proceed.
310 * For kernel PTEs, we rewrite the PTE and return and retry.
311 * Otherwise, we treat the fault like a normal "no PTE" fault,
312 * rather than trying to patch up the existing PTE.
313 */
314 pgd = get_current_pgd();
315 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
316 is_kernel_mode, write))
317 return 1;
318
319 si_code = SEGV_MAPERR;
320
321 /*
322 * We fault-in kernel-space virtual memory on-demand. The
323 * 'reference' page table is init_mm.pgd.
324 *
325 * NOTE! We MUST NOT take any locks for this case. We may
326 * be in an interrupt or a critical region, and should
327 * only copy the information from the master page table,
328 * nothing more.
329 *
330 * This verifies that the fault happens in kernel space
331 * and that the fault was not a protection fault.
332 */
333 if (unlikely(address >= TASK_SIZE &&
334 !is_arch_mappable_range(address, 0))) {
335 if (is_kernel_mode && is_page_fault &&
336 vmalloc_fault(pgd, address) >= 0)
337 return 1;
338 /*
339 * Don't take the mm semaphore here. If we fixup a prefetch
340 * fault we could otherwise deadlock.
341 */
342 mm = NULL; /* happy compiler */
343 vma = NULL;
344 goto bad_area_nosemaphore;
345 }
346
347 /*
348 * If we're trying to touch user-space addresses, we must
349 * be either at PL0, or else with interrupts enabled in the
350 * kernel, so either way we can re-enable interrupts here
351 * unless we are doing atomic access to user space with
352 * interrupts disabled.
353 */
354 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
355 local_irq_enable();
356
357 mm = tsk->mm;
358
359 /*
360 * If we're in an interrupt, have no user context or are running in an
361 * atomic region then we must not take the fault.
362 */
363 if (in_atomic() || !mm) {
364 vma = NULL; /* happy compiler */
365 goto bad_area_nosemaphore;
366 }
367
368 /*
369 * When running in the kernel we expect faults to occur only to
370 * addresses in user space. All other faults represent errors in the
371 * kernel and should generate an OOPS. Unfortunately, in the case of an
372 * erroneous fault occurring in a code path which already holds mmap_sem
373 * we will deadlock attempting to validate the fault against the
374 * address space. Luckily the kernel only validly references user
375 * space from well defined areas of code, which are listed in the
376 * exceptions table.
377 *
378 * As the vast majority of faults will be valid we will only perform
379 * the source reference check when there is a possibility of a deadlock.
380 * Attempt to lock the address space, if we cannot we then validate the
381 * source. If this is invalid we can skip the address space check,
382 * thus avoiding the deadlock.
383 */
384 if (!down_read_trylock(&mm->mmap_sem)) {
385 if (is_kernel_mode &&
386 !search_exception_tables(regs->pc)) {
387 vma = NULL; /* happy compiler */
388 goto bad_area_nosemaphore;
389 }
390
391 retry:
392 down_read(&mm->mmap_sem);
393 }
394
395 vma = find_vma(mm, address);
396 if (!vma)
397 goto bad_area;
398 if (vma->vm_start <= address)
399 goto good_area;
400 if (!(vma->vm_flags & VM_GROWSDOWN))
401 goto bad_area;
402 if (regs->sp < PAGE_OFFSET) {
403 /*
404 * accessing the stack below sp is always a bug.
405 */
406 if (address < regs->sp)
407 goto bad_area;
408 }
409 if (expand_stack(vma, address))
410 goto bad_area;
411
412 /*
413 * Ok, we have a good vm_area for this memory access, so
414 * we can handle it..
415 */
416 good_area:
417 si_code = SEGV_ACCERR;
418 if (fault_num == INT_ITLB_MISS) {
419 if (!(vma->vm_flags & VM_EXEC))
420 goto bad_area;
421 } else if (write) {
422 #ifdef TEST_VERIFY_AREA
423 if (!is_page_fault && regs->cs == KERNEL_CS)
424 pr_err("WP fault at "REGFMT"\n", regs->eip);
425 #endif
426 if (!(vma->vm_flags & VM_WRITE))
427 goto bad_area;
428 } else {
429 if (!is_page_fault || !(vma->vm_flags & VM_READ))
430 goto bad_area;
431 }
432
433 survive:
434 /*
435 * If for any reason at all we couldn't handle the fault,
436 * make sure we exit gracefully rather than endlessly redo
437 * the fault.
438 */
439 fault = handle_mm_fault(mm, vma, address, flags);
440
441 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
442 return 0;
443
444 if (unlikely(fault & VM_FAULT_ERROR)) {
445 if (fault & VM_FAULT_OOM)
446 goto out_of_memory;
447 else if (fault & VM_FAULT_SIGBUS)
448 goto do_sigbus;
449 BUG();
450 }
451 if (flags & FAULT_FLAG_ALLOW_RETRY) {
452 if (fault & VM_FAULT_MAJOR)
453 tsk->maj_flt++;
454 else
455 tsk->min_flt++;
456 if (fault & VM_FAULT_RETRY) {
457 flags &= ~FAULT_FLAG_ALLOW_RETRY;
458 flags |= FAULT_FLAG_TRIED;
459
460 /*
461 * No need to up_read(&mm->mmap_sem) as we would
462 * have already released it in __lock_page_or_retry
463 * in mm/filemap.c.
464 */
465 goto retry;
466 }
467 }
468
469 #if CHIP_HAS_TILE_DMA()
470 /* If this was a DMA TLB fault, restart the DMA engine. */
471 switch (fault_num) {
472 case INT_DMATLB_MISS:
473 case INT_DMATLB_MISS_DWNCL:
474 case INT_DMATLB_ACCESS:
475 case INT_DMATLB_ACCESS_DWNCL:
476 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
477 break;
478 }
479 #endif
480
481 up_read(&mm->mmap_sem);
482 return 1;
483
484 /*
485 * Something tried to access memory that isn't in our memory map..
486 * Fix it, but check if it's kernel or user first..
487 */
488 bad_area:
489 up_read(&mm->mmap_sem);
490
491 bad_area_nosemaphore:
492 /* User mode accesses just cause a SIGSEGV */
493 if (!is_kernel_mode) {
494 /*
495 * It's possible to have interrupts off here.
496 */
497 local_irq_enable();
498
499 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
500 fault_num, tsk, regs);
501 return 0;
502 }
503
504 no_context:
505 /* Are we prepared to handle this kernel fault? */
506 if (fixup_exception(regs))
507 return 0;
508
509 /*
510 * Oops. The kernel tried to access some bad page. We'll have to
511 * terminate things with extreme prejudice.
512 */
513
514 bust_spinlocks(1);
515
516 /* FIXME: no lookup_address() yet */
517 #ifdef SUPPORT_LOOKUP_ADDRESS
518 if (fault_num == INT_ITLB_MISS) {
519 pte_t *pte = lookup_address(address);
520
521 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
522 pr_crit("kernel tried to execute"
523 " non-executable page - exploit attempt?"
524 " (uid: %d)\n", current->uid);
525 }
526 #endif
527 if (address < PAGE_SIZE)
528 pr_alert("Unable to handle kernel NULL pointer dereference\n");
529 else
530 pr_alert("Unable to handle kernel paging request\n");
531 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
532 address, regs->pc);
533
534 show_regs(regs);
535
536 if (unlikely(tsk->pid < 2)) {
537 panic("Kernel page fault running %s!",
538 is_idle_task(tsk) ? "the idle task" : "init");
539 }
540
541 /*
542 * More FIXME: we should probably copy the i386 here and
543 * implement a generic die() routine. Not today.
544 */
545 #ifdef SUPPORT_DIE
546 die("Oops", regs);
547 #endif
548 bust_spinlocks(1);
549
550 do_group_exit(SIGKILL);
551
552 /*
553 * We ran out of memory, or some other thing happened to us that made
554 * us unable to handle the page fault gracefully.
555 */
556 out_of_memory:
557 up_read(&mm->mmap_sem);
558 if (is_global_init(tsk)) {
559 yield();
560 down_read(&mm->mmap_sem);
561 goto survive;
562 }
563 if (is_kernel_mode)
564 goto no_context;
565 pagefault_out_of_memory();
566 return 0;
567
568 do_sigbus:
569 up_read(&mm->mmap_sem);
570
571 /* Kernel mode? Handle exceptions or die */
572 if (is_kernel_mode)
573 goto no_context;
574
575 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
576 fault_num, tsk, regs);
577 return 0;
578 }
579
580 #ifndef __tilegx__
581
582 /* We must release ICS before panicking or we won't get anywhere. */
583 #define ics_panic(fmt, ...) do { \
584 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
585 panic(fmt, __VA_ARGS__); \
586 } while (0)
587
588 /*
589 * When we take an ITLB or DTLB fault or access violation in the
590 * supervisor while the critical section bit is set, the hypervisor is
591 * reluctant to write new values into the EX_CONTEXT_K_x registers,
592 * since that might indicate we have not yet squirreled the SPR
593 * contents away and can thus safely take a recursive interrupt.
594 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
595 *
596 * Note that this routine is called before homecache_tlb_defer_enter(),
597 * which means that we can properly unlock any atomics that might
598 * be used there (good), but also means we must be very sensitive
599 * to not touch any data structures that might be located in memory
600 * that could migrate, as we could be entering the kernel on a dataplane
601 * cpu that has been deferring kernel TLB updates. This means, for
602 * example, that we can't migrate init_mm or its pgd.
603 */
604 struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
605 unsigned long address,
606 unsigned long info)
607 {
608 unsigned long pc = info & ~1;
609 int write = info & 1;
610 pgd_t *pgd = get_current_pgd();
611
612 /* Retval is 1 at first since we will handle the fault fully. */
613 struct intvec_state state = {
614 do_page_fault, fault_num, address, write, 1
615 };
616
617 /* Validate that we are plausibly in the right routine. */
618 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
619 (fault_num != INT_DTLB_MISS &&
620 fault_num != INT_DTLB_ACCESS)) {
621 unsigned long old_pc = regs->pc;
622 regs->pc = pc;
623 ics_panic("Bad ICS page fault args:"
624 " old PC %#lx, fault %d/%d at %#lx\n",
625 old_pc, fault_num, write, address);
626 }
627
628 /* We might be faulting on a vmalloc page, so check that first. */
629 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
630 return state;
631
632 /*
633 * If we faulted with ICS set in sys_cmpxchg, we are providing
634 * a user syscall service that should generate a signal on
635 * fault. We didn't set up a kernel stack on initial entry to
636 * sys_cmpxchg, but instead had one set up by the fault, which
637 * (because sys_cmpxchg never releases ICS) came to us via the
638 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
639 * still referencing the original user code. We release the
640 * atomic lock and rewrite pt_regs so that it appears that we
641 * came from user-space directly, and after we finish the
642 * fault we'll go back to user space and re-issue the swint.
643 * This way the backtrace information is correct if we need to
644 * emit a stack dump at any point while handling this.
645 *
646 * Must match register use in sys_cmpxchg().
647 */
648 if (pc >= (unsigned long) sys_cmpxchg &&
649 pc < (unsigned long) __sys_cmpxchg_end) {
650 #ifdef CONFIG_SMP
651 /* Don't unlock before we could have locked. */
652 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
653 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
654 __atomic_fault_unlock(lock_ptr);
655 }
656 #endif
657 regs->sp = regs->regs[27];
658 }
659
660 /*
661 * We can also fault in the atomic assembly, in which
662 * case we use the exception table to do the first-level fixup.
663 * We may re-fixup again in the real fault handler if it
664 * turns out the faulting address is just bad, and not,
665 * for example, migrating.
666 */
667 else if (pc >= (unsigned long) __start_atomic_asm_code &&
668 pc < (unsigned long) __end_atomic_asm_code) {
669 const struct exception_table_entry *fixup;
670 #ifdef CONFIG_SMP
671 /* Unlock the atomic lock. */
672 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
673 __atomic_fault_unlock(lock_ptr);
674 #endif
675 fixup = search_exception_tables(pc);
676 if (!fixup)
677 ics_panic("ICS atomic fault not in table:"
678 " PC %#lx, fault %d", pc, fault_num);
679 regs->pc = fixup->fixup;
680 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
681 }
682
683 /*
684 * Now that we have released the atomic lock (if necessary),
685 * it's safe to spin if the PTE that caused the fault was migrating.
686 */
687 if (fault_num == INT_DTLB_ACCESS)
688 write = 1;
689 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
690 return state;
691
692 /* Return zero so that we continue on with normal fault handling. */
693 state.retval = 0;
694 return state;
695 }
696
697 #endif /* !__tilegx__ */
698
699 /*
700 * This routine handles page faults. It determines the address, and the
701 * problem, and then passes it handle_page_fault() for normal DTLB and
702 * ITLB issues, and for DMA or SN processor faults when we are in user
703 * space. For the latter, if we're in kernel mode, we just save the
704 * interrupt away appropriately and return immediately. We can't do
705 * page faults for user code while in kernel mode.
706 */
707 void do_page_fault(struct pt_regs *regs, int fault_num,
708 unsigned long address, unsigned long write)
709 {
710 int is_page_fault;
711
712 #ifdef CONFIG_KPROBES
713 /*
714 * This is to notify the fault handler of the kprobes. The
715 * exception code is redundant as it is also carried in REGS,
716 * but we pass it anyhow.
717 */
718 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
719 regs->faultnum, SIGSEGV) == NOTIFY_STOP)
720 return;
721 #endif
722
723 #ifdef __tilegx__
724 /*
725 * We don't need early do_page_fault_ics() support, since unlike
726 * Pro we don't need to worry about unlocking the atomic locks.
727 * There is only one current case in GX where we touch any memory
728 * under ICS other than our own kernel stack, and we handle that
729 * here. (If we crash due to trying to touch our own stack,
730 * we're in too much trouble for C code to help out anyway.)
731 */
732 if (write & ~1) {
733 unsigned long pc = write & ~1;
734 if (pc >= (unsigned long) __start_unalign_asm_code &&
735 pc < (unsigned long) __end_unalign_asm_code) {
736 struct thread_info *ti = current_thread_info();
737 /*
738 * Our EX_CONTEXT is still what it was from the
739 * initial unalign exception, but now we've faulted
740 * on the JIT page. We would like to complete the
741 * page fault however is appropriate, and then retry
742 * the instruction that caused the unalign exception.
743 * Our state has been "corrupted" by setting the low
744 * bit in "sp", and stashing r0..r3 in the
745 * thread_info area, so we revert all of that, then
746 * continue as if this were a normal page fault.
747 */
748 regs->sp &= ~1UL;
749 regs->regs[0] = ti->unalign_jit_tmp[0];
750 regs->regs[1] = ti->unalign_jit_tmp[1];
751 regs->regs[2] = ti->unalign_jit_tmp[2];
752 regs->regs[3] = ti->unalign_jit_tmp[3];
753 write &= 1;
754 } else {
755 pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
756 current->comm, current->pid, pc, address);
757 show_regs(regs);
758 do_group_exit(SIGKILL);
759 return;
760 }
761 }
762 #else
763 /* This case should have been handled by do_page_fault_ics(). */
764 BUG_ON(write & ~1);
765 #endif
766
767 #if CHIP_HAS_TILE_DMA()
768 /*
769 * If it's a DMA fault, suspend the transfer while we're
770 * handling the miss; we'll restart after it's handled. If we
771 * don't suspend, it's possible that this process could swap
772 * out and back in, and restart the engine since the DMA is
773 * still 'running'.
774 */
775 if (fault_num == INT_DMATLB_MISS ||
776 fault_num == INT_DMATLB_ACCESS ||
777 fault_num == INT_DMATLB_MISS_DWNCL ||
778 fault_num == INT_DMATLB_ACCESS_DWNCL) {
779 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
780 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
781 SPR_DMA_STATUS__BUSY_MASK)
782 ;
783 }
784 #endif
785
786 /* Validate fault num and decide if this is a first-time page fault. */
787 switch (fault_num) {
788 case INT_ITLB_MISS:
789 case INT_DTLB_MISS:
790 #if CHIP_HAS_TILE_DMA()
791 case INT_DMATLB_MISS:
792 case INT_DMATLB_MISS_DWNCL:
793 #endif
794 is_page_fault = 1;
795 break;
796
797 case INT_DTLB_ACCESS:
798 #if CHIP_HAS_TILE_DMA()
799 case INT_DMATLB_ACCESS:
800 case INT_DMATLB_ACCESS_DWNCL:
801 #endif
802 is_page_fault = 0;
803 break;
804
805 default:
806 panic("Bad fault number %d in do_page_fault", fault_num);
807 }
808
809 #if CHIP_HAS_TILE_DMA()
810 if (!user_mode(regs)) {
811 struct async_tlb *async;
812 switch (fault_num) {
813 #if CHIP_HAS_TILE_DMA()
814 case INT_DMATLB_MISS:
815 case INT_DMATLB_ACCESS:
816 case INT_DMATLB_MISS_DWNCL:
817 case INT_DMATLB_ACCESS_DWNCL:
818 async = &current->thread.dma_async_tlb;
819 break;
820 #endif
821 default:
822 async = NULL;
823 }
824 if (async) {
825
826 /*
827 * No vmalloc check required, so we can allow
828 * interrupts immediately at this point.
829 */
830 local_irq_enable();
831
832 set_thread_flag(TIF_ASYNC_TLB);
833 if (async->fault_num != 0) {
834 panic("Second async fault %d;"
835 " old fault was %d (%#lx/%ld)",
836 fault_num, async->fault_num,
837 address, write);
838 }
839 BUG_ON(fault_num == 0);
840 async->fault_num = fault_num;
841 async->is_fault = is_page_fault;
842 async->is_write = write;
843 async->address = address;
844 return;
845 }
846 }
847 #endif
848
849 handle_page_fault(regs, fault_num, is_page_fault, address, write);
850 }
851
852
853 #if CHIP_HAS_TILE_DMA()
854 /*
855 * This routine effectively re-issues asynchronous page faults
856 * when we are returning to user space.
857 */
858 void do_async_page_fault(struct pt_regs *regs)
859 {
860 struct async_tlb *async = &current->thread.dma_async_tlb;
861
862 /*
863 * Clear thread flag early. If we re-interrupt while processing
864 * code here, we will reset it and recall this routine before
865 * returning to user space.
866 */
867 clear_thread_flag(TIF_ASYNC_TLB);
868
869 if (async->fault_num) {
870 /*
871 * Clear async->fault_num before calling the page-fault
872 * handler so that if we re-interrupt before returning
873 * from the function we have somewhere to put the
874 * information from the new interrupt.
875 */
876 int fault_num = async->fault_num;
877 async->fault_num = 0;
878 handle_page_fault(regs, fault_num, async->is_fault,
879 async->address, async->is_write);
880 }
881 }
882 #endif /* CHIP_HAS_TILE_DMA() */
883
884
885 void vmalloc_sync_all(void)
886 {
887 #ifdef __tilegx__
888 /* Currently all L1 kernel pmd's are static and shared. */
889 BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
890 pgd_index(VMALLOC_START));
891 #else
892 /*
893 * Note that races in the updates of insync and start aren't
894 * problematic: insync can only get set bits added, and updates to
895 * start are only improving performance (without affecting correctness
896 * if undone).
897 */
898 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
899 static unsigned long start = PAGE_OFFSET;
900 unsigned long address;
901
902 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
903 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
904 if (!test_bit(pgd_index(address), insync)) {
905 unsigned long flags;
906 struct list_head *pos;
907
908 spin_lock_irqsave(&pgd_lock, flags);
909 list_for_each(pos, &pgd_list)
910 if (!vmalloc_sync_one(list_to_pgd(pos),
911 address)) {
912 /* Must be at first entry in list. */
913 BUG_ON(pos != pgd_list.next);
914 break;
915 }
916 spin_unlock_irqrestore(&pgd_lock, flags);
917 if (pos != pgd_list.next)
918 set_bit(pgd_index(address), insync);
919 }
920 if (address == start && test_bit(pgd_index(address), insync))
921 start = address + PGDIR_SIZE;
922 }
923 #endif
924 }
This page took 0.082181 seconds and 5 git commands to generate.