Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86-64/mm/fault.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/signal.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/smp_lock.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/tty.h> | |
23 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
24 | #include <linux/compiler.h> | |
25 | #include <linux/module.h> | |
1da177e4 LT |
26 | |
27 | #include <asm/system.h> | |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/smp.h> | |
31 | #include <asm/tlbflush.h> | |
32 | #include <asm/proto.h> | |
33 | #include <asm/kdebug.h> | |
34 | #include <asm-generic/sections.h> | |
35 | #include <asm/kdebug.h> | |
36 | ||
37 | void bust_spinlocks(int yes) | |
38 | { | |
39 | int loglevel_save = console_loglevel; | |
40 | if (yes) { | |
41 | oops_in_progress = 1; | |
42 | } else { | |
43 | #ifdef CONFIG_VT | |
44 | unblank_screen(); | |
45 | #endif | |
46 | oops_in_progress = 0; | |
47 | /* | |
48 | * OK, the message is on the console. Now we call printk() | |
49 | * without oops_in_progress set so that printk will give klogd | |
50 | * a poke. Hold onto your hats... | |
51 | */ | |
52 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | |
53 | printk(" "); | |
54 | console_loglevel = loglevel_save; | |
55 | } | |
56 | } | |
57 | ||
58 | /* Sometimes the CPU reports invalid exceptions on prefetch. | |
59 | Check that here and ignore. | |
60 | Opcode checker based on code by Richard Brunner */ | |
61 | static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |
62 | unsigned long error_code) | |
63 | { | |
f1290ec9 | 64 | unsigned char *instr; |
1da177e4 LT |
65 | int scan_more = 1; |
66 | int prefetch = 0; | |
f1290ec9 | 67 | unsigned char *max_instr; |
1da177e4 LT |
68 | |
69 | /* If it was a exec fault ignore */ | |
70 | if (error_code & (1<<4)) | |
71 | return 0; | |
72 | ||
f1290ec9 AK |
73 | instr = (unsigned char *)convert_rip_to_linear(current, regs); |
74 | max_instr = instr + 15; | |
1da177e4 | 75 | |
76381fee | 76 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
1da177e4 LT |
77 | return 0; |
78 | ||
79 | while (scan_more && instr < max_instr) { | |
80 | unsigned char opcode; | |
81 | unsigned char instr_hi; | |
82 | unsigned char instr_lo; | |
83 | ||
84 | if (__get_user(opcode, instr)) | |
85 | break; | |
86 | ||
87 | instr_hi = opcode & 0xf0; | |
88 | instr_lo = opcode & 0x0f; | |
89 | instr++; | |
90 | ||
91 | switch (instr_hi) { | |
92 | case 0x20: | |
93 | case 0x30: | |
94 | /* Values 0x26,0x2E,0x36,0x3E are valid x86 | |
95 | prefixes. In long mode, the CPU will signal | |
96 | invalid opcode if some of these prefixes are | |
97 | present so we will never get here anyway */ | |
98 | scan_more = ((instr_lo & 7) == 0x6); | |
99 | break; | |
100 | ||
101 | case 0x40: | |
102 | /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes | |
103 | Need to figure out under what instruction mode the | |
104 | instruction was issued ... */ | |
105 | /* Could check the LDT for lm, but for now it's good | |
106 | enough to assume that long mode only uses well known | |
107 | segments or kernel. */ | |
76381fee | 108 | scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); |
1da177e4 LT |
109 | break; |
110 | ||
111 | case 0x60: | |
112 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
113 | scan_more = (instr_lo & 0xC) == 0x4; | |
114 | break; | |
115 | case 0xF0: | |
116 | /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */ | |
117 | scan_more = !instr_lo || (instr_lo>>1) == 1; | |
118 | break; | |
119 | case 0x00: | |
120 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
121 | scan_more = 0; | |
122 | if (__get_user(opcode, instr)) | |
123 | break; | |
124 | prefetch = (instr_lo == 0xF) && | |
125 | (opcode == 0x0D || opcode == 0x18); | |
126 | break; | |
127 | default: | |
128 | scan_more = 0; | |
129 | break; | |
130 | } | |
131 | } | |
132 | return prefetch; | |
133 | } | |
134 | ||
135 | static int bad_address(void *p) | |
136 | { | |
137 | unsigned long dummy; | |
138 | return __get_user(dummy, (unsigned long *)p); | |
139 | } | |
140 | ||
141 | void dump_pagetable(unsigned long address) | |
142 | { | |
143 | pgd_t *pgd; | |
144 | pud_t *pud; | |
145 | pmd_t *pmd; | |
146 | pte_t *pte; | |
147 | ||
148 | asm("movq %%cr3,%0" : "=r" (pgd)); | |
149 | ||
150 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | |
151 | pgd += pgd_index(address); | |
152 | printk("PGD %lx ", pgd_val(*pgd)); | |
153 | if (bad_address(pgd)) goto bad; | |
154 | if (!pgd_present(*pgd)) goto ret; | |
155 | ||
156 | pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); | |
157 | if (bad_address(pud)) goto bad; | |
158 | printk("PUD %lx ", pud_val(*pud)); | |
159 | if (!pud_present(*pud)) goto ret; | |
160 | ||
161 | pmd = pmd_offset(pud, address); | |
162 | if (bad_address(pmd)) goto bad; | |
163 | printk("PMD %lx ", pmd_val(*pmd)); | |
164 | if (!pmd_present(*pmd)) goto ret; | |
165 | ||
166 | pte = pte_offset_kernel(pmd, address); | |
167 | if (bad_address(pte)) goto bad; | |
168 | printk("PTE %lx", pte_val(*pte)); | |
169 | ret: | |
170 | printk("\n"); | |
171 | return; | |
172 | bad: | |
173 | printk("BAD\n"); | |
174 | } | |
175 | ||
176 | static const char errata93_warning[] = | |
177 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
178 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | |
179 | KERN_ERR "******* Please consider a BIOS update.\n" | |
180 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | |
181 | ||
182 | /* Workaround for K8 erratum #93 & buggy BIOS. | |
183 | BIOS SMM functions are required to use a specific workaround | |
184 | to avoid corruption of the 64bit RIP register on C stepping K8. | |
185 | A lot of BIOS that didn't get tested properly miss this. | |
186 | The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
187 | Try to work around it here. | |
188 | Note we only handle faults in kernel here. */ | |
189 | ||
190 | static int is_errata93(struct pt_regs *regs, unsigned long address) | |
191 | { | |
192 | static int warned; | |
193 | if (address != regs->rip) | |
194 | return 0; | |
195 | if ((address >> 32) != 0) | |
196 | return 0; | |
197 | address |= 0xffffffffUL << 32; | |
198 | if ((address >= (u64)_stext && address <= (u64)_etext) || | |
199 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
200 | if (!warned) { | |
201 | printk(errata93_warning); | |
202 | warned = 1; | |
203 | } | |
204 | regs->rip = address; | |
205 | return 1; | |
206 | } | |
207 | return 0; | |
208 | } | |
209 | ||
210 | int unhandled_signal(struct task_struct *tsk, int sig) | |
211 | { | |
212 | if (tsk->pid == 1) | |
213 | return 1; | |
5e5ec104 | 214 | if (tsk->ptrace & PT_PTRACED) |
1da177e4 LT |
215 | return 0; |
216 | return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || | |
217 | (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); | |
218 | } | |
219 | ||
220 | static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, | |
221 | unsigned long error_code) | |
222 | { | |
223 | oops_begin(); | |
224 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | |
225 | current->comm, address); | |
226 | dump_pagetable(address); | |
227 | __die("Bad pagetable", regs, error_code); | |
228 | oops_end(); | |
229 | do_exit(SIGKILL); | |
230 | } | |
231 | ||
232 | /* | |
233 | * Handle a fault on the vmalloc or module mapping area | |
3b9ba4d5 AK |
234 | * |
235 | * This assumes no large pages in there. | |
1da177e4 LT |
236 | */ |
237 | static int vmalloc_fault(unsigned long address) | |
238 | { | |
239 | pgd_t *pgd, *pgd_ref; | |
240 | pud_t *pud, *pud_ref; | |
241 | pmd_t *pmd, *pmd_ref; | |
242 | pte_t *pte, *pte_ref; | |
243 | ||
244 | /* Copy kernel mappings over when needed. This can also | |
245 | happen within a race in page table update. In the later | |
246 | case just flush. */ | |
247 | ||
248 | pgd = pgd_offset(current->mm ?: &init_mm, address); | |
249 | pgd_ref = pgd_offset_k(address); | |
250 | if (pgd_none(*pgd_ref)) | |
251 | return -1; | |
252 | if (pgd_none(*pgd)) | |
253 | set_pgd(pgd, *pgd_ref); | |
254 | ||
255 | /* Below here mismatches are bugs because these lower tables | |
256 | are shared */ | |
257 | ||
258 | pud = pud_offset(pgd, address); | |
259 | pud_ref = pud_offset(pgd_ref, address); | |
260 | if (pud_none(*pud_ref)) | |
261 | return -1; | |
262 | if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref)) | |
263 | BUG(); | |
264 | pmd = pmd_offset(pud, address); | |
265 | pmd_ref = pmd_offset(pud_ref, address); | |
266 | if (pmd_none(*pmd_ref)) | |
267 | return -1; | |
268 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | |
269 | BUG(); | |
270 | pte_ref = pte_offset_kernel(pmd_ref, address); | |
271 | if (!pte_present(*pte_ref)) | |
272 | return -1; | |
273 | pte = pte_offset_kernel(pmd, address); | |
3b9ba4d5 AK |
274 | /* Don't use pte_page here, because the mappings can point |
275 | outside mem_map, and the NUMA hash lookup cannot handle | |
276 | that. */ | |
277 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
1da177e4 LT |
278 | BUG(); |
279 | __flush_tlb_all(); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | int page_fault_trace = 0; | |
284 | int exception_trace = 1; | |
285 | ||
286 | /* | |
287 | * This routine handles page faults. It determines the address, | |
288 | * and the problem, and then passes it off to one of the appropriate | |
289 | * routines. | |
290 | * | |
291 | * error_code: | |
292 | * bit 0 == 0 means no page found, 1 means protection fault | |
293 | * bit 1 == 0 means read, 1 means write | |
294 | * bit 2 == 0 means kernel, 1 means user-mode | |
295 | * bit 3 == 1 means fault was an instruction fetch | |
296 | */ | |
297 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
298 | { | |
299 | struct task_struct *tsk; | |
300 | struct mm_struct *mm; | |
301 | struct vm_area_struct * vma; | |
302 | unsigned long address; | |
303 | const struct exception_table_entry *fixup; | |
304 | int write; | |
305 | siginfo_t info; | |
306 | ||
307 | #ifdef CONFIG_CHECKING | |
308 | { | |
309 | unsigned long gs; | |
310 | struct x8664_pda *pda = cpu_pda + stack_smp_processor_id(); | |
311 | rdmsrl(MSR_GS_BASE, gs); | |
312 | if (gs != (unsigned long)pda) { | |
313 | wrmsrl(MSR_GS_BASE, pda); | |
314 | printk("page_fault: wrong gs %lx expected %p\n", gs, pda); | |
315 | } | |
316 | } | |
317 | #endif | |
318 | ||
319 | /* get the address */ | |
320 | __asm__("movq %%cr2,%0":"=r" (address)); | |
321 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | |
322 | SIGSEGV) == NOTIFY_STOP) | |
323 | return; | |
324 | ||
325 | if (likely(regs->eflags & X86_EFLAGS_IF)) | |
326 | local_irq_enable(); | |
327 | ||
328 | if (unlikely(page_fault_trace)) | |
329 | printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", | |
330 | regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); | |
331 | ||
332 | tsk = current; | |
333 | mm = tsk->mm; | |
334 | info.si_code = SEGV_MAPERR; | |
335 | ||
336 | ||
337 | /* | |
338 | * We fault-in kernel-space virtual memory on-demand. The | |
339 | * 'reference' page table is init_mm.pgd. | |
340 | * | |
341 | * NOTE! We MUST NOT take any locks for this case. We may | |
342 | * be in an interrupt or a critical region, and should | |
343 | * only copy the information from the master page table, | |
344 | * nothing more. | |
345 | * | |
346 | * This verifies that the fault happens in kernel space | |
347 | * (error_code & 4) == 0, and that the fault was not a | |
348 | * protection error (error_code & 1) == 0. | |
349 | */ | |
84929801 | 350 | if (unlikely(address >= TASK_SIZE64)) { |
3b9ba4d5 AK |
351 | if (!(error_code & 5) && |
352 | ((address >= VMALLOC_START && address < VMALLOC_END) || | |
353 | (address >= MODULES_VADDR && address < MODULES_END))) { | |
1da177e4 LT |
354 | if (vmalloc_fault(address) < 0) |
355 | goto bad_area_nosemaphore; | |
356 | return; | |
357 | } | |
358 | /* | |
359 | * Don't take the mm semaphore here. If we fixup a prefetch | |
360 | * fault we could otherwise deadlock. | |
361 | */ | |
362 | goto bad_area_nosemaphore; | |
363 | } | |
364 | ||
365 | if (unlikely(error_code & (1 << 3))) | |
366 | pgtable_bad(address, regs, error_code); | |
367 | ||
368 | /* | |
369 | * If we're in an interrupt or have no user | |
370 | * context, we must not take the fault.. | |
371 | */ | |
372 | if (unlikely(in_atomic() || !mm)) | |
373 | goto bad_area_nosemaphore; | |
374 | ||
375 | again: | |
376 | /* When running in the kernel we expect faults to occur only to | |
377 | * addresses in user space. All other faults represent errors in the | |
378 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | |
379 | * erroneous fault occuring in a code path which already holds mmap_sem | |
380 | * we will deadlock attempting to validate the fault against the | |
381 | * address space. Luckily the kernel only validly references user | |
382 | * space from well defined areas of code, which are listed in the | |
383 | * exceptions table. | |
384 | * | |
385 | * As the vast majority of faults will be valid we will only perform | |
386 | * the source reference check when there is a possibilty of a deadlock. | |
387 | * Attempt to lock the address space, if we cannot we then validate the | |
388 | * source. If this is invalid we can skip the address space check, | |
389 | * thus avoiding the deadlock. | |
390 | */ | |
391 | if (!down_read_trylock(&mm->mmap_sem)) { | |
392 | if ((error_code & 4) == 0 && | |
393 | !search_exception_tables(regs->rip)) | |
394 | goto bad_area_nosemaphore; | |
395 | down_read(&mm->mmap_sem); | |
396 | } | |
397 | ||
398 | vma = find_vma(mm, address); | |
399 | if (!vma) | |
400 | goto bad_area; | |
401 | if (likely(vma->vm_start <= address)) | |
402 | goto good_area; | |
403 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
404 | goto bad_area; | |
405 | if (error_code & 4) { | |
406 | // XXX: align red zone size with ABI | |
407 | if (address + 128 < regs->rsp) | |
408 | goto bad_area; | |
409 | } | |
410 | if (expand_stack(vma, address)) | |
411 | goto bad_area; | |
412 | /* | |
413 | * Ok, we have a good vm_area for this memory access, so | |
414 | * we can handle it.. | |
415 | */ | |
416 | good_area: | |
417 | info.si_code = SEGV_ACCERR; | |
418 | write = 0; | |
419 | switch (error_code & 3) { | |
420 | default: /* 3: write, present */ | |
421 | /* fall through */ | |
422 | case 2: /* write, not present */ | |
423 | if (!(vma->vm_flags & VM_WRITE)) | |
424 | goto bad_area; | |
425 | write++; | |
426 | break; | |
427 | case 1: /* read, present */ | |
428 | goto bad_area; | |
429 | case 0: /* read, not present */ | |
430 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
431 | goto bad_area; | |
432 | } | |
433 | ||
434 | /* | |
435 | * If for any reason at all we couldn't handle the fault, | |
436 | * make sure we exit gracefully rather than endlessly redo | |
437 | * the fault. | |
438 | */ | |
439 | switch (handle_mm_fault(mm, vma, address, write)) { | |
96800216 | 440 | case VM_FAULT_MINOR: |
1da177e4 LT |
441 | tsk->min_flt++; |
442 | break; | |
96800216 | 443 | case VM_FAULT_MAJOR: |
1da177e4 LT |
444 | tsk->maj_flt++; |
445 | break; | |
96800216 | 446 | case VM_FAULT_SIGBUS: |
1da177e4 LT |
447 | goto do_sigbus; |
448 | default: | |
449 | goto out_of_memory; | |
450 | } | |
451 | ||
452 | up_read(&mm->mmap_sem); | |
453 | return; | |
454 | ||
455 | /* | |
456 | * Something tried to access memory that isn't in our memory map.. | |
457 | * Fix it, but check if it's kernel or user first.. | |
458 | */ | |
459 | bad_area: | |
460 | up_read(&mm->mmap_sem); | |
461 | ||
462 | bad_area_nosemaphore: | |
1da177e4 LT |
463 | /* User mode accesses just cause a SIGSEGV */ |
464 | if (error_code & 4) { | |
465 | if (is_prefetch(regs, address, error_code)) | |
466 | return; | |
467 | ||
468 | /* Work around K8 erratum #100 K8 in compat mode | |
469 | occasionally jumps to illegal addresses >4GB. We | |
470 | catch this here in the page fault handler because | |
471 | these addresses are not reachable. Just detect this | |
472 | case and return. Any code segment in LDT is | |
473 | compatibility mode. */ | |
474 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && | |
475 | (address >> 32)) | |
476 | return; | |
477 | ||
478 | if (exception_trace && unhandled_signal(tsk, SIGSEGV)) { | |
479 | printk( | |
480 | "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", | |
481 | tsk->pid > 1 ? KERN_INFO : KERN_EMERG, | |
482 | tsk->comm, tsk->pid, address, regs->rip, | |
483 | regs->rsp, error_code); | |
484 | } | |
485 | ||
486 | tsk->thread.cr2 = address; | |
487 | /* Kernel addresses are always protection faults */ | |
488 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
489 | tsk->thread.trap_no = 14; | |
490 | info.si_signo = SIGSEGV; | |
491 | info.si_errno = 0; | |
492 | /* info.si_code has been set above */ | |
493 | info.si_addr = (void __user *)address; | |
494 | force_sig_info(SIGSEGV, &info, tsk); | |
495 | return; | |
496 | } | |
497 | ||
498 | no_context: | |
499 | ||
500 | /* Are we prepared to handle this kernel fault? */ | |
501 | fixup = search_exception_tables(regs->rip); | |
502 | if (fixup) { | |
503 | regs->rip = fixup->fixup; | |
504 | return; | |
505 | } | |
506 | ||
507 | /* | |
508 | * Hall of shame of CPU/BIOS bugs. | |
509 | */ | |
510 | ||
511 | if (is_prefetch(regs, address, error_code)) | |
512 | return; | |
513 | ||
514 | if (is_errata93(regs, address)) | |
515 | return; | |
516 | ||
517 | /* | |
518 | * Oops. The kernel tried to access some bad page. We'll have to | |
519 | * terminate things with extreme prejudice. | |
520 | */ | |
521 | ||
522 | oops_begin(); | |
523 | ||
524 | if (address < PAGE_SIZE) | |
525 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | |
526 | else | |
527 | printk(KERN_ALERT "Unable to handle kernel paging request"); | |
528 | printk(" at %016lx RIP: \n" KERN_ALERT,address); | |
529 | printk_address(regs->rip); | |
530 | printk("\n"); | |
531 | dump_pagetable(address); | |
532 | __die("Oops", regs, error_code); | |
533 | /* Executive summary in case the body of the oops scrolled away */ | |
534 | printk(KERN_EMERG "CR2: %016lx\n", address); | |
535 | oops_end(); | |
536 | do_exit(SIGKILL); | |
537 | ||
538 | /* | |
539 | * We ran out of memory, or some other thing happened to us that made | |
540 | * us unable to handle the page fault gracefully. | |
541 | */ | |
542 | out_of_memory: | |
543 | up_read(&mm->mmap_sem); | |
1da177e4 LT |
544 | if (current->pid == 1) { |
545 | yield(); | |
546 | goto again; | |
547 | } | |
548 | printk("VM: killing process %s\n", tsk->comm); | |
549 | if (error_code & 4) | |
550 | do_exit(SIGKILL); | |
551 | goto no_context; | |
552 | ||
553 | do_sigbus: | |
554 | up_read(&mm->mmap_sem); | |
555 | ||
556 | /* Kernel mode? Handle exceptions or die */ | |
557 | if (!(error_code & 4)) | |
558 | goto no_context; | |
559 | ||
560 | tsk->thread.cr2 = address; | |
561 | tsk->thread.error_code = error_code; | |
562 | tsk->thread.trap_no = 14; | |
563 | info.si_signo = SIGBUS; | |
564 | info.si_errno = 0; | |
565 | info.si_code = BUS_ADRERR; | |
566 | info.si_addr = (void __user *)address; | |
567 | force_sig_info(SIGBUS, &info, tsk); | |
568 | return; | |
569 | } |