Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * From i386 code copyright (C) 1995 Linus Torvalds | |
15 | */ | |
16 | ||
17 | #include <linux/signal.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/types.h> | |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/mman.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/smp.h> | |
867e359b CM |
27 | #include <linux/interrupt.h> |
28 | #include <linux/init.h> | |
29 | #include <linux/tty.h> | |
30 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
31 | #include <linux/highmem.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/kprobes.h> | |
34 | #include <linux/hugetlb.h> | |
35 | #include <linux/syscalls.h> | |
36 | #include <linux/uaccess.h> | |
37 | ||
867e359b CM |
38 | #include <asm/pgalloc.h> |
39 | #include <asm/sections.h> | |
0707ad30 CM |
40 | #include <asm/traps.h> |
41 | #include <asm/syscalls.h> | |
867e359b CM |
42 | |
43 | #include <arch/interrupts.h> | |
44 | ||
571d76ac CM |
45 | static noinline void force_sig_info_fault(const char *type, int si_signo, |
46 | int si_code, unsigned long address, | |
47 | int fault_num, | |
48 | struct task_struct *tsk, | |
49 | struct pt_regs *regs) | |
867e359b CM |
50 | { |
51 | siginfo_t info; | |
52 | ||
53 | if (unlikely(tsk->pid < 2)) { | |
54 | panic("Signal %d (code %d) at %#lx sent to %s!", | |
55 | si_signo, si_code & 0xffff, address, | |
a95f8817 | 56 | is_idle_task(tsk) ? "the idle task" : "init"); |
867e359b CM |
57 | } |
58 | ||
59 | info.si_signo = si_signo; | |
60 | info.si_errno = 0; | |
61 | info.si_code = si_code; | |
62 | info.si_addr = (void __user *)address; | |
63 | info.si_trapno = fault_num; | |
571d76ac | 64 | trace_unhandled_signal(type, regs, address, si_signo); |
867e359b CM |
65 | force_sig_info(si_signo, &info, tsk); |
66 | } | |
67 | ||
68 | #ifndef __tilegx__ | |
69 | /* | |
70 | * Synthesize the fault a PL0 process would get by doing a word-load of | |
d929b6ae | 71 | * an unaligned address or a high kernel address. |
867e359b | 72 | */ |
d929b6ae CM |
73 | SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address, |
74 | struct pt_regs *, regs) | |
867e359b CM |
75 | { |
76 | if (address >= PAGE_OFFSET) | |
571d76ac CM |
77 | force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR, |
78 | address, INT_DTLB_MISS, current, regs); | |
867e359b | 79 | else |
571d76ac CM |
80 | force_sig_info_fault("atomic alignment fault", SIGBUS, |
81 | BUS_ADRALN, address, | |
82 | INT_UNALIGN_DATA, current, regs); | |
867e359b CM |
83 | |
84 | /* | |
85 | * Adjust pc to point at the actual instruction, which is unusual | |
86 | * for syscalls normally, but is appropriate when we are claiming | |
87 | * that a syscall swint1 caused a page fault or bus error. | |
88 | */ | |
89 | regs->pc -= 8; | |
90 | ||
91 | /* | |
92 | * Mark this as a caller-save interrupt, like a normal page fault, | |
93 | * so that when we go through the signal handler path we will | |
94 | * properly restore r0, r1, and r2 for the signal handler arguments. | |
95 | */ | |
96 | regs->flags |= PT_FLAGS_CALLER_SAVES; | |
97 | ||
98 | return 0; | |
99 | } | |
100 | #endif | |
101 | ||
102 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
103 | { | |
104 | unsigned index = pgd_index(address); | |
105 | pgd_t *pgd_k; | |
106 | pud_t *pud, *pud_k; | |
107 | pmd_t *pmd, *pmd_k; | |
108 | ||
109 | pgd += index; | |
110 | pgd_k = init_mm.pgd + index; | |
111 | ||
112 | if (!pgd_present(*pgd_k)) | |
113 | return NULL; | |
114 | ||
115 | pud = pud_offset(pgd, address); | |
116 | pud_k = pud_offset(pgd_k, address); | |
117 | if (!pud_present(*pud_k)) | |
118 | return NULL; | |
119 | ||
120 | pmd = pmd_offset(pud, address); | |
121 | pmd_k = pmd_offset(pud_k, address); | |
122 | if (!pmd_present(*pmd_k)) | |
123 | return NULL; | |
124 | if (!pmd_present(*pmd)) { | |
125 | set_pmd(pmd, *pmd_k); | |
126 | arch_flush_lazy_mmu_mode(); | |
127 | } else | |
128 | BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); | |
129 | return pmd_k; | |
130 | } | |
131 | ||
132 | /* | |
51bcdf88 | 133 | * Handle a fault on the vmalloc area. |
867e359b CM |
134 | */ |
135 | static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) | |
136 | { | |
137 | pmd_t *pmd_k; | |
138 | pte_t *pte_k; | |
139 | ||
140 | /* Make sure we are in vmalloc area */ | |
141 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
142 | return -1; | |
143 | ||
144 | /* | |
145 | * Synchronize this task's top level page-table | |
146 | * with the 'reference' page table. | |
147 | */ | |
148 | pmd_k = vmalloc_sync_one(pgd, address); | |
149 | if (!pmd_k) | |
150 | return -1; | |
151 | if (pmd_huge(*pmd_k)) | |
152 | return 0; /* support TILE huge_vmap() API */ | |
153 | pte_k = pte_offset_kernel(pmd_k, address); | |
154 | if (!pte_present(*pte_k)) | |
155 | return -1; | |
156 | return 0; | |
157 | } | |
158 | ||
159 | /* Wait until this PTE has completed migration. */ | |
160 | static void wait_for_migration(pte_t *pte) | |
161 | { | |
162 | if (pte_migrating(*pte)) { | |
163 | /* | |
164 | * Wait until the migrater fixes up this pte. | |
165 | * We scale the loop count by the clock rate so we'll wait for | |
166 | * a few seconds here. | |
167 | */ | |
168 | int retries = 0; | |
169 | int bound = get_clock_rate(); | |
170 | while (pte_migrating(*pte)) { | |
171 | barrier(); | |
172 | if (++retries > bound) | |
173 | panic("Hit migrating PTE (%#llx) and" | |
174 | " page PFN %#lx still migrating", | |
175 | pte->val, pte_pfn(*pte)); | |
176 | } | |
177 | } | |
178 | } | |
179 | ||
180 | /* | |
181 | * It's not generally safe to use "current" to get the page table pointer, | |
182 | * since we might be running an oprofile interrupt in the middle of a | |
183 | * task switch. | |
184 | */ | |
185 | static pgd_t *get_current_pgd(void) | |
186 | { | |
187 | HV_Context ctx = hv_inquire_context(); | |
188 | unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; | |
189 | struct page *pgd_page = pfn_to_page(pgd_pfn); | |
190 | BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ | |
191 | return (pgd_t *) __va(ctx.page_table); | |
192 | } | |
193 | ||
194 | /* | |
195 | * We can receive a page fault from a migrating PTE at any time. | |
196 | * Handle it by just waiting until the fault resolves. | |
197 | * | |
198 | * It's also possible to get a migrating kernel PTE that resolves | |
199 | * itself during the downcall from hypervisor to Linux. We just check | |
200 | * here to see if the PTE seems valid, and if so we retry it. | |
201 | * | |
202 | * NOTE! We MUST NOT take any locks for this case. We may be in an | |
203 | * interrupt or a critical region, and must do as little as possible. | |
204 | * Similarly, we can't use atomic ops here, since we may be handling a | |
205 | * fault caused by an atomic op access. | |
48292738 CM |
206 | * |
207 | * If we find a migrating PTE while we're in an NMI context, and we're | |
208 | * at a PC that has a registered exception handler, we don't wait, | |
209 | * since this thread may (e.g.) have been interrupted while migrating | |
210 | * its own stack, which would then cause us to self-deadlock. | |
867e359b CM |
211 | */ |
212 | static int handle_migrating_pte(pgd_t *pgd, int fault_num, | |
48292738 | 213 | unsigned long address, unsigned long pc, |
867e359b CM |
214 | int is_kernel_mode, int write) |
215 | { | |
216 | pud_t *pud; | |
217 | pmd_t *pmd; | |
218 | pte_t *pte; | |
219 | pte_t pteval; | |
220 | ||
221 | if (pgd_addr_invalid(address)) | |
222 | return 0; | |
223 | ||
224 | pgd += pgd_index(address); | |
225 | pud = pud_offset(pgd, address); | |
226 | if (!pud || !pud_present(*pud)) | |
227 | return 0; | |
228 | pmd = pmd_offset(pud, address); | |
229 | if (!pmd || !pmd_present(*pmd)) | |
230 | return 0; | |
231 | pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : | |
232 | pte_offset_kernel(pmd, address); | |
233 | pteval = *pte; | |
234 | if (pte_migrating(pteval)) { | |
48292738 CM |
235 | if (in_nmi() && search_exception_tables(pc)) |
236 | return 0; | |
867e359b CM |
237 | wait_for_migration(pte); |
238 | return 1; | |
239 | } | |
240 | ||
241 | if (!is_kernel_mode || !pte_present(pteval)) | |
242 | return 0; | |
243 | if (fault_num == INT_ITLB_MISS) { | |
244 | if (pte_exec(pteval)) | |
245 | return 1; | |
246 | } else if (write) { | |
247 | if (pte_write(pteval)) | |
248 | return 1; | |
249 | } else { | |
250 | if (pte_read(pteval)) | |
251 | return 1; | |
252 | } | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | /* | |
258 | * This routine is responsible for faulting in user pages. | |
259 | * It passes the work off to one of the appropriate routines. | |
260 | * It returns true if the fault was successfully handled. | |
261 | */ | |
262 | static int handle_page_fault(struct pt_regs *regs, | |
263 | int fault_num, | |
264 | int is_page_fault, | |
265 | unsigned long address, | |
266 | int write) | |
267 | { | |
268 | struct task_struct *tsk; | |
269 | struct mm_struct *mm; | |
270 | struct vm_area_struct *vma; | |
271 | unsigned long stack_offset; | |
272 | int fault; | |
273 | int si_code; | |
274 | int is_kernel_mode; | |
275 | pgd_t *pgd; | |
276 | ||
277 | /* on TILE, protection faults are always writes */ | |
278 | if (!is_page_fault) | |
279 | write = 1; | |
280 | ||
281 | is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); | |
282 | ||
283 | tsk = validate_current(); | |
284 | ||
285 | /* | |
286 | * Check to see if we might be overwriting the stack, and bail | |
287 | * out if so. The page fault code is a relatively likely | |
288 | * place to get trapped in an infinite regress, and once we | |
289 | * overwrite the whole stack, it becomes very hard to recover. | |
290 | */ | |
291 | stack_offset = stack_pointer & (THREAD_SIZE-1); | |
292 | if (stack_offset < THREAD_SIZE / 8) { | |
0707ad30 | 293 | pr_alert("Potential stack overrun: sp %#lx\n", |
867e359b CM |
294 | stack_pointer); |
295 | show_regs(regs); | |
0707ad30 | 296 | pr_alert("Killing current process %d/%s\n", |
867e359b CM |
297 | tsk->pid, tsk->comm); |
298 | do_group_exit(SIGKILL); | |
299 | } | |
300 | ||
301 | /* | |
302 | * Early on, we need to check for migrating PTE entries; | |
303 | * see homecache.c. If we find a migrating PTE, we wait until | |
25985edc | 304 | * the backing page claims to be done migrating, then we proceed. |
867e359b CM |
305 | * For kernel PTEs, we rewrite the PTE and return and retry. |
306 | * Otherwise, we treat the fault like a normal "no PTE" fault, | |
307 | * rather than trying to patch up the existing PTE. | |
308 | */ | |
309 | pgd = get_current_pgd(); | |
48292738 | 310 | if (handle_migrating_pte(pgd, fault_num, address, regs->pc, |
867e359b CM |
311 | is_kernel_mode, write)) |
312 | return 1; | |
313 | ||
314 | si_code = SEGV_MAPERR; | |
315 | ||
316 | /* | |
317 | * We fault-in kernel-space virtual memory on-demand. The | |
318 | * 'reference' page table is init_mm.pgd. | |
319 | * | |
320 | * NOTE! We MUST NOT take any locks for this case. We may | |
321 | * be in an interrupt or a critical region, and should | |
322 | * only copy the information from the master page table, | |
323 | * nothing more. | |
324 | * | |
325 | * This verifies that the fault happens in kernel space | |
326 | * and that the fault was not a protection fault. | |
327 | */ | |
328 | if (unlikely(address >= TASK_SIZE && | |
329 | !is_arch_mappable_range(address, 0))) { | |
330 | if (is_kernel_mode && is_page_fault && | |
331 | vmalloc_fault(pgd, address) >= 0) | |
332 | return 1; | |
333 | /* | |
334 | * Don't take the mm semaphore here. If we fixup a prefetch | |
335 | * fault we could otherwise deadlock. | |
336 | */ | |
337 | mm = NULL; /* happy compiler */ | |
338 | vma = NULL; | |
339 | goto bad_area_nosemaphore; | |
340 | } | |
341 | ||
342 | /* | |
343 | * If we're trying to touch user-space addresses, we must | |
344 | * be either at PL0, or else with interrupts enabled in the | |
345 | * kernel, so either way we can re-enable interrupts here. | |
346 | */ | |
347 | local_irq_enable(); | |
348 | ||
349 | mm = tsk->mm; | |
350 | ||
351 | /* | |
352 | * If we're in an interrupt, have no user context or are running in an | |
353 | * atomic region then we must not take the fault. | |
354 | */ | |
355 | if (in_atomic() || !mm) { | |
356 | vma = NULL; /* happy compiler */ | |
357 | goto bad_area_nosemaphore; | |
358 | } | |
359 | ||
360 | /* | |
361 | * When running in the kernel we expect faults to occur only to | |
362 | * addresses in user space. All other faults represent errors in the | |
363 | * kernel and should generate an OOPS. Unfortunately, in the case of an | |
364 | * erroneous fault occurring in a code path which already holds mmap_sem | |
365 | * we will deadlock attempting to validate the fault against the | |
366 | * address space. Luckily the kernel only validly references user | |
367 | * space from well defined areas of code, which are listed in the | |
368 | * exceptions table. | |
369 | * | |
370 | * As the vast majority of faults will be valid we will only perform | |
371 | * the source reference check when there is a possibility of a deadlock. | |
372 | * Attempt to lock the address space, if we cannot we then validate the | |
373 | * source. If this is invalid we can skip the address space check, | |
374 | * thus avoiding the deadlock. | |
375 | */ | |
376 | if (!down_read_trylock(&mm->mmap_sem)) { | |
377 | if (is_kernel_mode && | |
378 | !search_exception_tables(regs->pc)) { | |
379 | vma = NULL; /* happy compiler */ | |
380 | goto bad_area_nosemaphore; | |
381 | } | |
382 | down_read(&mm->mmap_sem); | |
383 | } | |
384 | ||
385 | vma = find_vma(mm, address); | |
386 | if (!vma) | |
387 | goto bad_area; | |
388 | if (vma->vm_start <= address) | |
389 | goto good_area; | |
390 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
391 | goto bad_area; | |
392 | if (regs->sp < PAGE_OFFSET) { | |
393 | /* | |
394 | * accessing the stack below sp is always a bug. | |
395 | */ | |
396 | if (address < regs->sp) | |
397 | goto bad_area; | |
398 | } | |
399 | if (expand_stack(vma, address)) | |
400 | goto bad_area; | |
401 | ||
402 | /* | |
403 | * Ok, we have a good vm_area for this memory access, so | |
404 | * we can handle it.. | |
405 | */ | |
406 | good_area: | |
407 | si_code = SEGV_ACCERR; | |
408 | if (fault_num == INT_ITLB_MISS) { | |
409 | if (!(vma->vm_flags & VM_EXEC)) | |
410 | goto bad_area; | |
411 | } else if (write) { | |
412 | #ifdef TEST_VERIFY_AREA | |
413 | if (!is_page_fault && regs->cs == KERNEL_CS) | |
0707ad30 | 414 | pr_err("WP fault at "REGFMT"\n", regs->eip); |
867e359b CM |
415 | #endif |
416 | if (!(vma->vm_flags & VM_WRITE)) | |
417 | goto bad_area; | |
418 | } else { | |
419 | if (!is_page_fault || !(vma->vm_flags & VM_READ)) | |
420 | goto bad_area; | |
421 | } | |
422 | ||
423 | survive: | |
424 | /* | |
425 | * If for any reason at all we couldn't handle the fault, | |
426 | * make sure we exit gracefully rather than endlessly redo | |
427 | * the fault. | |
428 | */ | |
429 | fault = handle_mm_fault(mm, vma, address, write); | |
430 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
431 | if (fault & VM_FAULT_OOM) | |
432 | goto out_of_memory; | |
433 | else if (fault & VM_FAULT_SIGBUS) | |
434 | goto do_sigbus; | |
435 | BUG(); | |
436 | } | |
437 | if (fault & VM_FAULT_MAJOR) | |
438 | tsk->maj_flt++; | |
439 | else | |
440 | tsk->min_flt++; | |
441 | ||
0707ad30 | 442 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() |
867e359b CM |
443 | /* |
444 | * If this was an asynchronous fault, | |
445 | * restart the appropriate engine. | |
446 | */ | |
447 | switch (fault_num) { | |
448 | #if CHIP_HAS_TILE_DMA() | |
449 | case INT_DMATLB_MISS: | |
450 | case INT_DMATLB_MISS_DWNCL: | |
451 | case INT_DMATLB_ACCESS: | |
452 | case INT_DMATLB_ACCESS_DWNCL: | |
453 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | |
454 | break; | |
455 | #endif | |
456 | #if CHIP_HAS_SN_PROC() | |
457 | case INT_SNITLB_MISS: | |
458 | case INT_SNITLB_MISS_DWNCL: | |
459 | __insn_mtspr(SPR_SNCTL, | |
460 | __insn_mfspr(SPR_SNCTL) & | |
461 | ~SPR_SNCTL__FRZPROC_MASK); | |
462 | break; | |
463 | #endif | |
464 | } | |
0707ad30 | 465 | #endif |
867e359b CM |
466 | |
467 | up_read(&mm->mmap_sem); | |
468 | return 1; | |
469 | ||
470 | /* | |
471 | * Something tried to access memory that isn't in our memory map.. | |
472 | * Fix it, but check if it's kernel or user first.. | |
473 | */ | |
474 | bad_area: | |
475 | up_read(&mm->mmap_sem); | |
476 | ||
477 | bad_area_nosemaphore: | |
478 | /* User mode accesses just cause a SIGSEGV */ | |
479 | if (!is_kernel_mode) { | |
480 | /* | |
481 | * It's possible to have interrupts off here. | |
482 | */ | |
483 | local_irq_enable(); | |
484 | ||
571d76ac CM |
485 | force_sig_info_fault("segfault", SIGSEGV, si_code, address, |
486 | fault_num, tsk, regs); | |
867e359b CM |
487 | return 0; |
488 | } | |
489 | ||
490 | no_context: | |
491 | /* Are we prepared to handle this kernel fault? */ | |
492 | if (fixup_exception(regs)) | |
493 | return 0; | |
494 | ||
495 | /* | |
496 | * Oops. The kernel tried to access some bad page. We'll have to | |
497 | * terminate things with extreme prejudice. | |
498 | */ | |
499 | ||
500 | bust_spinlocks(1); | |
501 | ||
502 | /* FIXME: no lookup_address() yet */ | |
503 | #ifdef SUPPORT_LOOKUP_ADDRESS | |
504 | if (fault_num == INT_ITLB_MISS) { | |
505 | pte_t *pte = lookup_address(address); | |
506 | ||
507 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | |
0707ad30 | 508 | pr_crit("kernel tried to execute" |
867e359b CM |
509 | " non-executable page - exploit attempt?" |
510 | " (uid: %d)\n", current->uid); | |
511 | } | |
512 | #endif | |
513 | if (address < PAGE_SIZE) | |
0707ad30 | 514 | pr_alert("Unable to handle kernel NULL pointer dereference\n"); |
867e359b | 515 | else |
0707ad30 CM |
516 | pr_alert("Unable to handle kernel paging request\n"); |
517 | pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", | |
518 | address, regs->pc); | |
867e359b CM |
519 | |
520 | show_regs(regs); | |
521 | ||
522 | if (unlikely(tsk->pid < 2)) { | |
523 | panic("Kernel page fault running %s!", | |
a95f8817 | 524 | is_idle_task(tsk) ? "the idle task" : "init"); |
867e359b CM |
525 | } |
526 | ||
527 | /* | |
528 | * More FIXME: we should probably copy the i386 here and | |
529 | * implement a generic die() routine. Not today. | |
530 | */ | |
531 | #ifdef SUPPORT_DIE | |
532 | die("Oops", regs); | |
533 | #endif | |
534 | bust_spinlocks(1); | |
535 | ||
536 | do_group_exit(SIGKILL); | |
537 | ||
538 | /* | |
539 | * We ran out of memory, or some other thing happened to us that made | |
540 | * us unable to handle the page fault gracefully. | |
541 | */ | |
542 | out_of_memory: | |
543 | up_read(&mm->mmap_sem); | |
544 | if (is_global_init(tsk)) { | |
545 | yield(); | |
546 | down_read(&mm->mmap_sem); | |
547 | goto survive; | |
548 | } | |
0707ad30 | 549 | pr_alert("VM: killing process %s\n", tsk->comm); |
867e359b CM |
550 | if (!is_kernel_mode) |
551 | do_group_exit(SIGKILL); | |
552 | goto no_context; | |
553 | ||
554 | do_sigbus: | |
555 | up_read(&mm->mmap_sem); | |
556 | ||
557 | /* Kernel mode? Handle exceptions or die */ | |
558 | if (is_kernel_mode) | |
559 | goto no_context; | |
560 | ||
571d76ac CM |
561 | force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address, |
562 | fault_num, tsk, regs); | |
867e359b CM |
563 | return 0; |
564 | } | |
565 | ||
566 | #ifndef __tilegx__ | |
567 | ||
867e359b CM |
568 | /* We must release ICS before panicking or we won't get anywhere. */ |
569 | #define ics_panic(fmt, ...) do { \ | |
570 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ | |
571 | panic(fmt, __VA_ARGS__); \ | |
572 | } while (0) | |
573 | ||
867e359b CM |
574 | /* |
575 | * When we take an ITLB or DTLB fault or access violation in the | |
576 | * supervisor while the critical section bit is set, the hypervisor is | |
a78c942d | 577 | * reluctant to write new values into the EX_CONTEXT_K_x registers, |
867e359b CM |
578 | * since that might indicate we have not yet squirreled the SPR |
579 | * contents away and can thus safely take a recursive interrupt. | |
a78c942d | 580 | * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2. |
c745a8a1 CM |
581 | * |
582 | * Note that this routine is called before homecache_tlb_defer_enter(), | |
583 | * which means that we can properly unlock any atomics that might | |
584 | * be used there (good), but also means we must be very sensitive | |
585 | * to not touch any data structures that might be located in memory | |
586 | * that could migrate, as we could be entering the kernel on a dataplane | |
587 | * cpu that has been deferring kernel TLB updates. This means, for | |
588 | * example, that we can't migrate init_mm or its pgd. | |
867e359b CM |
589 | */ |
590 | struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | |
591 | unsigned long address, | |
592 | unsigned long info) | |
593 | { | |
594 | unsigned long pc = info & ~1; | |
595 | int write = info & 1; | |
596 | pgd_t *pgd = get_current_pgd(); | |
597 | ||
598 | /* Retval is 1 at first since we will handle the fault fully. */ | |
599 | struct intvec_state state = { | |
600 | do_page_fault, fault_num, address, write, 1 | |
601 | }; | |
602 | ||
603 | /* Validate that we are plausibly in the right routine. */ | |
604 | if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || | |
605 | (fault_num != INT_DTLB_MISS && | |
606 | fault_num != INT_DTLB_ACCESS)) { | |
607 | unsigned long old_pc = regs->pc; | |
608 | regs->pc = pc; | |
609 | ics_panic("Bad ICS page fault args:" | |
610 | " old PC %#lx, fault %d/%d at %#lx\n", | |
611 | old_pc, fault_num, write, address); | |
612 | } | |
613 | ||
614 | /* We might be faulting on a vmalloc page, so check that first. */ | |
615 | if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) | |
616 | return state; | |
617 | ||
618 | /* | |
619 | * If we faulted with ICS set in sys_cmpxchg, we are providing | |
620 | * a user syscall service that should generate a signal on | |
621 | * fault. We didn't set up a kernel stack on initial entry to | |
622 | * sys_cmpxchg, but instead had one set up by the fault, which | |
623 | * (because sys_cmpxchg never releases ICS) came to us via the | |
a78c942d | 624 | * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are |
867e359b CM |
625 | * still referencing the original user code. We release the |
626 | * atomic lock and rewrite pt_regs so that it appears that we | |
627 | * came from user-space directly, and after we finish the | |
628 | * fault we'll go back to user space and re-issue the swint. | |
629 | * This way the backtrace information is correct if we need to | |
630 | * emit a stack dump at any point while handling this. | |
631 | * | |
632 | * Must match register use in sys_cmpxchg(). | |
633 | */ | |
634 | if (pc >= (unsigned long) sys_cmpxchg && | |
635 | pc < (unsigned long) __sys_cmpxchg_end) { | |
636 | #ifdef CONFIG_SMP | |
637 | /* Don't unlock before we could have locked. */ | |
638 | if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { | |
639 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | |
640 | __atomic_fault_unlock(lock_ptr); | |
641 | } | |
642 | #endif | |
643 | regs->sp = regs->regs[27]; | |
644 | } | |
645 | ||
646 | /* | |
647 | * We can also fault in the atomic assembly, in which | |
648 | * case we use the exception table to do the first-level fixup. | |
649 | * We may re-fixup again in the real fault handler if it | |
650 | * turns out the faulting address is just bad, and not, | |
651 | * for example, migrating. | |
652 | */ | |
653 | else if (pc >= (unsigned long) __start_atomic_asm_code && | |
654 | pc < (unsigned long) __end_atomic_asm_code) { | |
655 | const struct exception_table_entry *fixup; | |
656 | #ifdef CONFIG_SMP | |
657 | /* Unlock the atomic lock. */ | |
658 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | |
659 | __atomic_fault_unlock(lock_ptr); | |
660 | #endif | |
661 | fixup = search_exception_tables(pc); | |
662 | if (!fixup) | |
663 | ics_panic("ICS atomic fault not in table:" | |
664 | " PC %#lx, fault %d", pc, fault_num); | |
665 | regs->pc = fixup->fixup; | |
666 | regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); | |
667 | } | |
668 | ||
867e359b CM |
669 | /* |
670 | * Now that we have released the atomic lock (if necessary), | |
671 | * it's safe to spin if the PTE that caused the fault was migrating. | |
672 | */ | |
673 | if (fault_num == INT_DTLB_ACCESS) | |
674 | write = 1; | |
48292738 | 675 | if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write)) |
867e359b CM |
676 | return state; |
677 | ||
678 | /* Return zero so that we continue on with normal fault handling. */ | |
679 | state.retval = 0; | |
680 | return state; | |
681 | } | |
682 | ||
683 | #endif /* !__tilegx__ */ | |
684 | ||
685 | /* | |
686 | * This routine handles page faults. It determines the address, and the | |
687 | * problem, and then passes it handle_page_fault() for normal DTLB and | |
688 | * ITLB issues, and for DMA or SN processor faults when we are in user | |
689 | * space. For the latter, if we're in kernel mode, we just save the | |
690 | * interrupt away appropriately and return immediately. We can't do | |
691 | * page faults for user code while in kernel mode. | |
692 | */ | |
693 | void do_page_fault(struct pt_regs *regs, int fault_num, | |
694 | unsigned long address, unsigned long write) | |
695 | { | |
696 | int is_page_fault; | |
697 | ||
698 | /* This case should have been handled by do_page_fault_ics(). */ | |
699 | BUG_ON(write & ~1); | |
700 | ||
701 | #if CHIP_HAS_TILE_DMA() | |
702 | /* | |
703 | * If it's a DMA fault, suspend the transfer while we're | |
704 | * handling the miss; we'll restart after it's handled. If we | |
705 | * don't suspend, it's possible that this process could swap | |
706 | * out and back in, and restart the engine since the DMA is | |
707 | * still 'running'. | |
708 | */ | |
709 | if (fault_num == INT_DMATLB_MISS || | |
710 | fault_num == INT_DMATLB_ACCESS || | |
711 | fault_num == INT_DMATLB_MISS_DWNCL || | |
712 | fault_num == INT_DMATLB_ACCESS_DWNCL) { | |
713 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | |
714 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | |
715 | SPR_DMA_STATUS__BUSY_MASK) | |
716 | ; | |
717 | } | |
718 | #endif | |
719 | ||
720 | /* Validate fault num and decide if this is a first-time page fault. */ | |
721 | switch (fault_num) { | |
722 | case INT_ITLB_MISS: | |
723 | case INT_DTLB_MISS: | |
724 | #if CHIP_HAS_TILE_DMA() | |
725 | case INT_DMATLB_MISS: | |
726 | case INT_DMATLB_MISS_DWNCL: | |
727 | #endif | |
728 | #if CHIP_HAS_SN_PROC() | |
729 | case INT_SNITLB_MISS: | |
730 | case INT_SNITLB_MISS_DWNCL: | |
731 | #endif | |
732 | is_page_fault = 1; | |
733 | break; | |
734 | ||
735 | case INT_DTLB_ACCESS: | |
736 | #if CHIP_HAS_TILE_DMA() | |
737 | case INT_DMATLB_ACCESS: | |
738 | case INT_DMATLB_ACCESS_DWNCL: | |
739 | #endif | |
740 | is_page_fault = 0; | |
741 | break; | |
742 | ||
743 | default: | |
744 | panic("Bad fault number %d in do_page_fault", fault_num); | |
745 | } | |
746 | ||
313ce674 | 747 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() |
867e359b CM |
748 | if (EX1_PL(regs->ex1) != USER_PL) { |
749 | struct async_tlb *async; | |
750 | switch (fault_num) { | |
751 | #if CHIP_HAS_TILE_DMA() | |
752 | case INT_DMATLB_MISS: | |
753 | case INT_DMATLB_ACCESS: | |
754 | case INT_DMATLB_MISS_DWNCL: | |
755 | case INT_DMATLB_ACCESS_DWNCL: | |
756 | async = ¤t->thread.dma_async_tlb; | |
757 | break; | |
758 | #endif | |
759 | #if CHIP_HAS_SN_PROC() | |
760 | case INT_SNITLB_MISS: | |
761 | case INT_SNITLB_MISS_DWNCL: | |
762 | async = ¤t->thread.sn_async_tlb; | |
763 | break; | |
764 | #endif | |
765 | default: | |
766 | async = NULL; | |
767 | } | |
768 | if (async) { | |
769 | ||
770 | /* | |
771 | * No vmalloc check required, so we can allow | |
772 | * interrupts immediately at this point. | |
773 | */ | |
774 | local_irq_enable(); | |
775 | ||
776 | set_thread_flag(TIF_ASYNC_TLB); | |
777 | if (async->fault_num != 0) { | |
778 | panic("Second async fault %d;" | |
779 | " old fault was %d (%#lx/%ld)", | |
780 | fault_num, async->fault_num, | |
781 | address, write); | |
782 | } | |
783 | BUG_ON(fault_num == 0); | |
784 | async->fault_num = fault_num; | |
785 | async->is_fault = is_page_fault; | |
786 | async->is_write = write; | |
787 | async->address = address; | |
788 | return; | |
789 | } | |
790 | } | |
313ce674 | 791 | #endif |
867e359b CM |
792 | |
793 | handle_page_fault(regs, fault_num, is_page_fault, address, write); | |
794 | } | |
795 | ||
796 | ||
797 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | |
798 | /* | |
799 | * Check an async_tlb structure to see if a deferred fault is waiting, | |
800 | * and if so pass it to the page-fault code. | |
801 | */ | |
802 | static void handle_async_page_fault(struct pt_regs *regs, | |
803 | struct async_tlb *async) | |
804 | { | |
805 | if (async->fault_num) { | |
806 | /* | |
807 | * Clear async->fault_num before calling the page-fault | |
808 | * handler so that if we re-interrupt before returning | |
809 | * from the function we have somewhere to put the | |
810 | * information from the new interrupt. | |
811 | */ | |
812 | int fault_num = async->fault_num; | |
813 | async->fault_num = 0; | |
814 | handle_page_fault(regs, fault_num, async->is_fault, | |
815 | async->address, async->is_write); | |
816 | } | |
817 | } | |
867e359b CM |
818 | |
819 | /* | |
820 | * This routine effectively re-issues asynchronous page faults | |
821 | * when we are returning to user space. | |
822 | */ | |
823 | void do_async_page_fault(struct pt_regs *regs) | |
824 | { | |
825 | /* | |
826 | * Clear thread flag early. If we re-interrupt while processing | |
827 | * code here, we will reset it and recall this routine before | |
828 | * returning to user space. | |
829 | */ | |
830 | clear_thread_flag(TIF_ASYNC_TLB); | |
831 | ||
832 | #if CHIP_HAS_TILE_DMA() | |
833 | handle_async_page_fault(regs, ¤t->thread.dma_async_tlb); | |
834 | #endif | |
835 | #if CHIP_HAS_SN_PROC() | |
836 | handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); | |
837 | #endif | |
838 | } | |
313ce674 CM |
839 | #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ |
840 | ||
867e359b CM |
841 | |
842 | void vmalloc_sync_all(void) | |
843 | { | |
844 | #ifdef __tilegx__ | |
845 | /* Currently all L1 kernel pmd's are static and shared. */ | |
846 | BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); | |
847 | #else | |
848 | /* | |
849 | * Note that races in the updates of insync and start aren't | |
850 | * problematic: insync can only get set bits added, and updates to | |
851 | * start are only improving performance (without affecting correctness | |
852 | * if undone). | |
853 | */ | |
854 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | |
855 | static unsigned long start = PAGE_OFFSET; | |
856 | unsigned long address; | |
857 | ||
858 | BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); | |
859 | for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { | |
860 | if (!test_bit(pgd_index(address), insync)) { | |
861 | unsigned long flags; | |
862 | struct list_head *pos; | |
863 | ||
864 | spin_lock_irqsave(&pgd_lock, flags); | |
865 | list_for_each(pos, &pgd_list) | |
866 | if (!vmalloc_sync_one(list_to_pgd(pos), | |
867 | address)) { | |
868 | /* Must be at first entry in list. */ | |
869 | BUG_ON(pos != pgd_list.next); | |
870 | break; | |
871 | } | |
872 | spin_unlock_irqrestore(&pgd_lock, flags); | |
873 | if (pos != pgd_list.next) | |
874 | set_bit(pgd_index(address), insync); | |
875 | } | |
876 | if (address == start && test_bit(pgd_index(address), insync)) | |
877 | start = address + PGDIR_SIZE; | |
878 | } | |
879 | #endif | |
880 | } |