tty: maintainers data was edited wrongly by someone
[deliverable/linux.git] / arch / x86 / mm / fault.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
2d4a7167 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
f8eeb2e6 4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
1da177e4 5 */
a2bcd473
IM
6#include <linux/magic.h> /* STACK_END_MAGIC */
7#include <linux/sched.h> /* test_thread_flag(), ... */
8#include <linux/kdebug.h> /* oops_begin/end, ... */
9#include <linux/module.h> /* search_exception_table */
10#include <linux/bootmem.h> /* max_low_pfn */
11#include <linux/kprobes.h> /* __kprobes, ... */
12#include <linux/mmiotrace.h> /* kmmio_handler, ... */
940010c5 13#include <linux/perf_counter.h> /* perf_swcounter_event */
2d4a7167 14
a2bcd473
IM
15#include <asm/traps.h> /* dotraplinkage, ... */
16#include <asm/pgalloc.h> /* pgd_*(), ... */
f8561296 17#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
1da177e4 18
33cb5243 19/*
2d4a7167
IM
20 * Page fault error code bits:
21 *
22 * bit 0 == 0: no page found 1: protection fault
23 * bit 1 == 0: read access 1: write access
24 * bit 2 == 0: kernel-mode access 1: user-mode access
25 * bit 3 == 1: use of reserved bit detected
26 * bit 4 == 1: fault was an instruction fetch
33cb5243 27 */
2d4a7167
IM
28enum x86_pf_error_code {
29
30 PF_PROT = 1 << 0,
31 PF_WRITE = 1 << 1,
32 PF_USER = 1 << 2,
33 PF_RSVD = 1 << 3,
34 PF_INSTR = 1 << 4,
35};
66c58156 36
b814d41f 37/*
b319eed0
IM
38 * Returns 0 if mmiotrace is disabled, or if the fault is not
39 * handled by mmiotrace:
b814d41f 40 */
0fd0e3da 41static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
86069782 42{
0fd0e3da
PP
43 if (unlikely(is_kmmio_active()))
44 if (kmmio_handler(regs, addr) == 1)
45 return -1;
0fd0e3da 46 return 0;
86069782
PP
47}
48
74a0b576 49static inline int notify_page_fault(struct pt_regs *regs)
1bd858a5 50{
74a0b576
CH
51 int ret = 0;
52
53 /* kprobe_running() needs smp_processor_id() */
b1801812 54 if (kprobes_built_in() && !user_mode_vm(regs)) {
74a0b576
CH
55 preempt_disable();
56 if (kprobe_running() && kprobe_fault_handler(regs, 14))
57 ret = 1;
58 preempt_enable();
59 }
1bd858a5 60
74a0b576 61 return ret;
33cb5243 62}
1bd858a5 63
1dc85be0 64/*
2d4a7167
IM
65 * Prefetch quirks:
66 *
67 * 32-bit mode:
68 *
69 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
70 * Check that here and ignore it.
1dc85be0 71 *
2d4a7167 72 * 64-bit mode:
1dc85be0 73 *
2d4a7167
IM
74 * Sometimes the CPU reports invalid exceptions on prefetch.
75 * Check that here and ignore it.
76 *
77 * Opcode checker based on code by Richard Brunner.
1dc85be0 78 */
107a0367
IM
79static inline int
80check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
81 unsigned char opcode, int *prefetch)
82{
83 unsigned char instr_hi = opcode & 0xf0;
84 unsigned char instr_lo = opcode & 0x0f;
85
86 switch (instr_hi) {
87 case 0x20:
88 case 0x30:
89 /*
90 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
91 * In X86_64 long mode, the CPU will signal invalid
92 * opcode if some of these prefixes are present so
93 * X86_64 will never get here anyway
94 */
95 return ((instr_lo & 7) == 0x6);
96#ifdef CONFIG_X86_64
97 case 0x40:
98 /*
99 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
100 * Need to figure out under what instruction mode the
101 * instruction was issued. Could check the LDT for lm,
102 * but for now it's good enough to assume that long
103 * mode only uses well known segments or kernel.
104 */
105 return (!user_mode(regs)) || (regs->cs == __USER_CS);
106#endif
107 case 0x60:
108 /* 0x64 thru 0x67 are valid prefixes in all modes. */
109 return (instr_lo & 0xC) == 0x4;
110 case 0xF0:
111 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
112 return !instr_lo || (instr_lo>>1) == 1;
113 case 0x00:
114 /* Prefetch instruction is 0x0F0D or 0x0F18 */
115 if (probe_kernel_address(instr, opcode))
116 return 0;
117
118 *prefetch = (instr_lo == 0xF) &&
119 (opcode == 0x0D || opcode == 0x18);
120 return 0;
121 default:
122 return 0;
123 }
124}
125
2d4a7167
IM
126static int
127is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
33cb5243 128{
2d4a7167 129 unsigned char *max_instr;
ab2bf0c1 130 unsigned char *instr;
33cb5243 131 int prefetch = 0;
1da177e4 132
3085354d
IM
133 /*
134 * If it was a exec (instruction fetch) fault on NX page, then
135 * do not ignore the fault:
136 */
66c58156 137 if (error_code & PF_INSTR)
1da177e4 138 return 0;
1dc85be0 139
107a0367 140 instr = (void *)convert_ip_to_linear(current, regs);
f1290ec9 141 max_instr = instr + 15;
1da177e4 142
76381fee 143 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
1da177e4
LT
144 return 0;
145
107a0367 146 while (instr < max_instr) {
2d4a7167 147 unsigned char opcode;
1da177e4 148
ab2bf0c1 149 if (probe_kernel_address(instr, opcode))
33cb5243 150 break;
1da177e4 151
1da177e4
LT
152 instr++;
153
107a0367 154 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
1da177e4 155 break;
1da177e4
LT
156 }
157 return prefetch;
158}
159
2d4a7167
IM
160static void
161force_sig_info_fault(int si_signo, int si_code, unsigned long address,
162 struct task_struct *tsk)
c4aba4a8
HH
163{
164 siginfo_t info;
165
2d4a7167
IM
166 info.si_signo = si_signo;
167 info.si_errno = 0;
168 info.si_code = si_code;
169 info.si_addr = (void __user *)address;
170
c4aba4a8
HH
171 force_sig_info(si_signo, &info, tsk);
172}
173
f2f13a85
IM
174DEFINE_SPINLOCK(pgd_lock);
175LIST_HEAD(pgd_list);
176
177#ifdef CONFIG_X86_32
178static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
33cb5243 179{
f2f13a85
IM
180 unsigned index = pgd_index(address);
181 pgd_t *pgd_k;
182 pud_t *pud, *pud_k;
183 pmd_t *pmd, *pmd_k;
2d4a7167 184
f2f13a85
IM
185 pgd += index;
186 pgd_k = init_mm.pgd + index;
187
188 if (!pgd_present(*pgd_k))
189 return NULL;
190
191 /*
192 * set_pgd(pgd, *pgd_k); here would be useless on PAE
193 * and redundant with the set_pmd() on non-PAE. As would
194 * set_pud.
195 */
196 pud = pud_offset(pgd, address);
197 pud_k = pud_offset(pgd_k, address);
198 if (!pud_present(*pud_k))
199 return NULL;
200
201 pmd = pmd_offset(pud, address);
202 pmd_k = pmd_offset(pud_k, address);
203 if (!pmd_present(*pmd_k))
204 return NULL;
205
b8bcfe99 206 if (!pmd_present(*pmd))
f2f13a85 207 set_pmd(pmd, *pmd_k);
b8bcfe99 208 else
f2f13a85 209 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
f2f13a85
IM
210
211 return pmd_k;
212}
213
214void vmalloc_sync_all(void)
215{
216 unsigned long address;
217
218 if (SHARED_KERNEL_PMD)
219 return;
220
221 for (address = VMALLOC_START & PMD_MASK;
222 address >= TASK_SIZE && address < FIXADDR_TOP;
223 address += PMD_SIZE) {
224
225 unsigned long flags;
226 struct page *page;
227
228 spin_lock_irqsave(&pgd_lock, flags);
229 list_for_each_entry(page, &pgd_list, lru) {
230 if (!vmalloc_sync_one(page_address(page), address))
231 break;
232 }
233 spin_unlock_irqrestore(&pgd_lock, flags);
234 }
235}
236
237/*
238 * 32-bit:
239 *
240 * Handle a fault on the vmalloc or module mapping area
241 */
242static noinline int vmalloc_fault(unsigned long address)
243{
244 unsigned long pgd_paddr;
245 pmd_t *pmd_k;
246 pte_t *pte_k;
247
248 /* Make sure we are in vmalloc area: */
249 if (!(address >= VMALLOC_START && address < VMALLOC_END))
250 return -1;
251
252 /*
253 * Synchronize this task's top level page-table
254 * with the 'reference' page table.
255 *
256 * Do _not_ use "current" here. We might be inside
257 * an interrupt in the middle of a task switch..
258 */
259 pgd_paddr = read_cr3();
260 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
261 if (!pmd_k)
262 return -1;
263
264 pte_k = pte_offset_kernel(pmd_k, address);
265 if (!pte_present(*pte_k))
266 return -1;
267
268 return 0;
269}
270
271/*
272 * Did it hit the DOS screen memory VA from vm86 mode?
273 */
274static inline void
275check_v8086_mode(struct pt_regs *regs, unsigned long address,
276 struct task_struct *tsk)
277{
278 unsigned long bit;
279
280 if (!v8086_mode(regs))
281 return;
282
283 bit = (address - 0xA0000) >> PAGE_SHIFT;
284 if (bit < 32)
285 tsk->thread.screen_bitmap |= 1 << bit;
33cb5243 286}
1da177e4 287
cae30f82 288static void dump_pagetable(unsigned long address)
1da177e4 289{
1156e098
HH
290 __typeof__(pte_val(__pte(0))) page;
291
292 page = read_cr3();
293 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
2d4a7167 294
1156e098
HH
295#ifdef CONFIG_X86_PAE
296 printk("*pdpt = %016Lx ", page);
297 if ((page >> PAGE_SHIFT) < max_low_pfn
298 && page & _PAGE_PRESENT) {
299 page &= PAGE_MASK;
300 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
2d4a7167 301 & (PTRS_PER_PMD - 1)];
1156e098
HH
302 printk(KERN_CONT "*pde = %016Lx ", page);
303 page &= ~_PAGE_NX;
304 }
305#else
306 printk("*pde = %08lx ", page);
307#endif
308
309 /*
310 * We must not directly access the pte in the highpte
311 * case if the page table is located in highmem.
312 * And let's rather not kmap-atomic the pte, just in case
2d4a7167 313 * it's allocated already:
1156e098
HH
314 */
315 if ((page >> PAGE_SHIFT) < max_low_pfn
316 && (page & _PAGE_PRESENT)
317 && !(page & _PAGE_PSE)) {
2d4a7167 318
1156e098
HH
319 page &= PAGE_MASK;
320 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
2d4a7167 321 & (PTRS_PER_PTE - 1)];
1156e098
HH
322 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
323 }
324
325 printk("\n");
f2f13a85
IM
326}
327
328#else /* CONFIG_X86_64: */
329
330void vmalloc_sync_all(void)
331{
332 unsigned long address;
333
334 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
335 address += PGDIR_SIZE) {
336
337 const pgd_t *pgd_ref = pgd_offset_k(address);
338 unsigned long flags;
339 struct page *page;
340
341 if (pgd_none(*pgd_ref))
342 continue;
343
344 spin_lock_irqsave(&pgd_lock, flags);
345 list_for_each_entry(page, &pgd_list, lru) {
346 pgd_t *pgd;
347 pgd = (pgd_t *)page_address(page) + pgd_index(address);
348 if (pgd_none(*pgd))
349 set_pgd(pgd, *pgd_ref);
350 else
351 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
352 }
353 spin_unlock_irqrestore(&pgd_lock, flags);
354 }
355}
356
357/*
358 * 64-bit:
359 *
360 * Handle a fault on the vmalloc area
361 *
362 * This assumes no large pages in there.
363 */
364static noinline int vmalloc_fault(unsigned long address)
365{
366 pgd_t *pgd, *pgd_ref;
367 pud_t *pud, *pud_ref;
368 pmd_t *pmd, *pmd_ref;
369 pte_t *pte, *pte_ref;
370
371 /* Make sure we are in vmalloc area: */
372 if (!(address >= VMALLOC_START && address < VMALLOC_END))
373 return -1;
374
375 /*
376 * Copy kernel mappings over when needed. This can also
377 * happen within a race in page table update. In the later
378 * case just flush:
379 */
380 pgd = pgd_offset(current->active_mm, address);
381 pgd_ref = pgd_offset_k(address);
382 if (pgd_none(*pgd_ref))
383 return -1;
384
385 if (pgd_none(*pgd))
386 set_pgd(pgd, *pgd_ref);
387 else
388 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
389
390 /*
391 * Below here mismatches are bugs because these lower tables
392 * are shared:
393 */
394
395 pud = pud_offset(pgd, address);
396 pud_ref = pud_offset(pgd_ref, address);
397 if (pud_none(*pud_ref))
398 return -1;
399
400 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
401 BUG();
402
403 pmd = pmd_offset(pud, address);
404 pmd_ref = pmd_offset(pud_ref, address);
405 if (pmd_none(*pmd_ref))
406 return -1;
407
408 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
409 BUG();
410
411 pte_ref = pte_offset_kernel(pmd_ref, address);
412 if (!pte_present(*pte_ref))
413 return -1;
414
415 pte = pte_offset_kernel(pmd, address);
416
417 /*
418 * Don't use pte_page here, because the mappings can point
419 * outside mem_map, and the NUMA hash lookup cannot handle
420 * that:
421 */
422 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
423 BUG();
424
425 return 0;
426}
427
428static const char errata93_warning[] =
429KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
430KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
431KERN_ERR "******* Please consider a BIOS update.\n"
432KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
433
434/*
435 * No vm86 mode in 64-bit mode:
436 */
437static inline void
438check_v8086_mode(struct pt_regs *regs, unsigned long address,
439 struct task_struct *tsk)
440{
441}
442
443static int bad_address(void *p)
444{
445 unsigned long dummy;
446
447 return probe_kernel_address((unsigned long *)p, dummy);
448}
449
450static void dump_pagetable(unsigned long address)
451{
1da177e4
LT
452 pgd_t *pgd;
453 pud_t *pud;
454 pmd_t *pmd;
455 pte_t *pte;
456
f51c9452 457 pgd = (pgd_t *)read_cr3();
1da177e4 458
33cb5243 459 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
2d4a7167 460
1da177e4 461 pgd += pgd_index(address);
2d4a7167
IM
462 if (bad_address(pgd))
463 goto bad;
464
d646bce4 465 printk("PGD %lx ", pgd_val(*pgd));
2d4a7167
IM
466
467 if (!pgd_present(*pgd))
468 goto out;
1da177e4 469
d2ae5b5f 470 pud = pud_offset(pgd, address);
2d4a7167
IM
471 if (bad_address(pud))
472 goto bad;
473
1da177e4 474 printk("PUD %lx ", pud_val(*pud));
b5360222 475 if (!pud_present(*pud) || pud_large(*pud))
2d4a7167 476 goto out;
1da177e4
LT
477
478 pmd = pmd_offset(pud, address);
2d4a7167
IM
479 if (bad_address(pmd))
480 goto bad;
481
1da177e4 482 printk("PMD %lx ", pmd_val(*pmd));
2d4a7167
IM
483 if (!pmd_present(*pmd) || pmd_large(*pmd))
484 goto out;
1da177e4
LT
485
486 pte = pte_offset_kernel(pmd, address);
2d4a7167
IM
487 if (bad_address(pte))
488 goto bad;
489
33cb5243 490 printk("PTE %lx", pte_val(*pte));
2d4a7167 491out:
1da177e4
LT
492 printk("\n");
493 return;
494bad:
495 printk("BAD\n");
8c938f9f
IM
496}
497
f2f13a85 498#endif /* CONFIG_X86_64 */
1da177e4 499
2d4a7167
IM
500/*
501 * Workaround for K8 erratum #93 & buggy BIOS.
502 *
503 * BIOS SMM functions are required to use a specific workaround
504 * to avoid corruption of the 64bit RIP register on C stepping K8.
505 *
506 * A lot of BIOS that didn't get tested properly miss this.
507 *
508 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
509 * Try to work around it here.
510 *
511 * Note we only handle faults in kernel here.
512 * Does nothing on 32-bit.
fdfe8aa8 513 */
33cb5243 514static int is_errata93(struct pt_regs *regs, unsigned long address)
1da177e4 515{
fdfe8aa8 516#ifdef CONFIG_X86_64
65ea5b03 517 if (address != regs->ip)
1da177e4 518 return 0;
2d4a7167 519
33cb5243 520 if ((address >> 32) != 0)
1da177e4 521 return 0;
2d4a7167 522
1da177e4 523 address |= 0xffffffffUL << 32;
33cb5243
HH
524 if ((address >= (u64)_stext && address <= (u64)_etext) ||
525 (address >= MODULES_VADDR && address <= MODULES_END)) {
a454ab31 526 printk_once(errata93_warning);
65ea5b03 527 regs->ip = address;
1da177e4
LT
528 return 1;
529 }
fdfe8aa8 530#endif
1da177e4 531 return 0;
33cb5243 532}
1da177e4 533
35f3266f 534/*
2d4a7167
IM
535 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
536 * to illegal addresses >4GB.
537 *
538 * We catch this in the page fault handler because these addresses
539 * are not reachable. Just detect this case and return. Any code
35f3266f
HH
540 * segment in LDT is compatibility mode.
541 */
542static int is_errata100(struct pt_regs *regs, unsigned long address)
543{
544#ifdef CONFIG_X86_64
2d4a7167 545 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
35f3266f
HH
546 return 1;
547#endif
548 return 0;
549}
550
29caf2f9
HH
551static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
552{
553#ifdef CONFIG_X86_F00F_BUG
554 unsigned long nr;
2d4a7167 555
29caf2f9 556 /*
2d4a7167 557 * Pentium F0 0F C7 C8 bug workaround:
29caf2f9
HH
558 */
559 if (boot_cpu_data.f00f_bug) {
560 nr = (address - idt_descr.address) >> 3;
561
562 if (nr == 6) {
563 do_invalid_op(regs, 0);
564 return 1;
565 }
566 }
567#endif
568 return 0;
569}
570
8f766149
IM
571static const char nx_warning[] = KERN_CRIT
572"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
573
2d4a7167
IM
574static void
575show_fault_oops(struct pt_regs *regs, unsigned long error_code,
576 unsigned long address)
b3279c7f 577{
1156e098
HH
578 if (!oops_may_print())
579 return;
580
1156e098 581 if (error_code & PF_INSTR) {
93809be8 582 unsigned int level;
2d4a7167 583
1156e098
HH
584 pte_t *pte = lookup_address(address, &level);
585
8f766149
IM
586 if (pte && pte_present(*pte) && !pte_exec(*pte))
587 printk(nx_warning, current_uid());
1156e098 588 }
1156e098 589
19f0dda9 590 printk(KERN_ALERT "BUG: unable to handle kernel ");
b3279c7f 591 if (address < PAGE_SIZE)
19f0dda9 592 printk(KERN_CONT "NULL pointer dereference");
b3279c7f 593 else
19f0dda9 594 printk(KERN_CONT "paging request");
2d4a7167 595
f294a8ce 596 printk(KERN_CONT " at %p\n", (void *) address);
19f0dda9 597 printk(KERN_ALERT "IP:");
b3279c7f 598 printk_address(regs->ip, 1);
2d4a7167 599
b3279c7f
HH
600 dump_pagetable(address);
601}
602
2d4a7167
IM
603static noinline void
604pgtable_bad(struct pt_regs *regs, unsigned long error_code,
605 unsigned long address)
1da177e4 606{
2d4a7167
IM
607 struct task_struct *tsk;
608 unsigned long flags;
609 int sig;
610
611 flags = oops_begin();
612 tsk = current;
613 sig = SIGKILL;
1209140c 614
1da177e4 615 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
92181f19 616 tsk->comm, address);
1da177e4 617 dump_pagetable(address);
2d4a7167
IM
618
619 tsk->thread.cr2 = address;
620 tsk->thread.trap_no = 14;
621 tsk->thread.error_code = error_code;
622
22f5991c 623 if (__die("Bad pagetable", regs, error_code))
874d93d1 624 sig = 0;
2d4a7167 625
874d93d1 626 oops_end(flags, regs, sig);
1da177e4
LT
627}
628
2d4a7167
IM
629static noinline void
630no_context(struct pt_regs *regs, unsigned long error_code,
631 unsigned long address)
92181f19
NP
632{
633 struct task_struct *tsk = current;
19803078 634 unsigned long *stackend;
92181f19
NP
635 unsigned long flags;
636 int sig;
92181f19 637
2d4a7167 638 /* Are we prepared to handle this kernel fault? */
92181f19
NP
639 if (fixup_exception(regs))
640 return;
641
642 /*
2d4a7167
IM
643 * 32-bit:
644 *
645 * Valid to do another page fault here, because if this fault
646 * had been triggered by is_prefetch fixup_exception would have
647 * handled it.
648 *
649 * 64-bit:
92181f19 650 *
2d4a7167 651 * Hall of shame of CPU/BIOS bugs.
92181f19
NP
652 */
653 if (is_prefetch(regs, error_code, address))
654 return;
655
656 if (is_errata93(regs, address))
657 return;
658
659 /*
660 * Oops. The kernel tried to access some bad page. We'll have to
2d4a7167 661 * terminate things with extreme prejudice:
92181f19 662 */
92181f19 663 flags = oops_begin();
92181f19
NP
664
665 show_fault_oops(regs, error_code, address);
666
2d4a7167 667 stackend = end_of_stack(tsk);
19803078
IM
668 if (*stackend != STACK_END_MAGIC)
669 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
670
1cc99544
IM
671 tsk->thread.cr2 = address;
672 tsk->thread.trap_no = 14;
673 tsk->thread.error_code = error_code;
92181f19 674
92181f19
NP
675 sig = SIGKILL;
676 if (__die("Oops", regs, error_code))
677 sig = 0;
2d4a7167 678
92181f19
NP
679 /* Executive summary in case the body of the oops scrolled away */
680 printk(KERN_EMERG "CR2: %016lx\n", address);
2d4a7167 681
92181f19 682 oops_end(flags, regs, sig);
92181f19
NP
683}
684
2d4a7167
IM
685/*
686 * Print out info about fatal segfaults, if the show_unhandled_signals
687 * sysctl is set:
688 */
689static inline void
690show_signal_msg(struct pt_regs *regs, unsigned long error_code,
691 unsigned long address, struct task_struct *tsk)
692{
693 if (!unhandled_signal(tsk, SIGSEGV))
694 return;
695
696 if (!printk_ratelimit())
697 return;
698
699 printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
700 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
701 tsk->comm, task_pid_nr(tsk), address,
702 (void *)regs->ip, (void *)regs->sp, error_code);
703
704 print_vma_addr(KERN_CONT " in ", regs->ip);
705
706 printk(KERN_CONT "\n");
707}
708
709static void
710__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
711 unsigned long address, int si_code)
92181f19
NP
712{
713 struct task_struct *tsk = current;
714
715 /* User mode accesses just cause a SIGSEGV */
716 if (error_code & PF_USER) {
717 /*
2d4a7167 718 * It's possible to have interrupts off here:
92181f19
NP
719 */
720 local_irq_enable();
721
722 /*
723 * Valid to do another page fault here because this one came
2d4a7167 724 * from user space:
92181f19
NP
725 */
726 if (is_prefetch(regs, error_code, address))
727 return;
728
729 if (is_errata100(regs, address))
730 return;
731
2d4a7167
IM
732 if (unlikely(show_unhandled_signals))
733 show_signal_msg(regs, error_code, address, tsk);
734
735 /* Kernel addresses are always protection faults: */
736 tsk->thread.cr2 = address;
737 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
738 tsk->thread.trap_no = 14;
92181f19 739
92181f19 740 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
2d4a7167 741
92181f19
NP
742 return;
743 }
744
745 if (is_f00f_bug(regs, address))
746 return;
747
748 no_context(regs, error_code, address);
749}
750
2d4a7167
IM
751static noinline void
752bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
753 unsigned long address)
92181f19
NP
754{
755 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
756}
757
2d4a7167
IM
758static void
759__bad_area(struct pt_regs *regs, unsigned long error_code,
760 unsigned long address, int si_code)
92181f19
NP
761{
762 struct mm_struct *mm = current->mm;
763
764 /*
765 * Something tried to access memory that isn't in our memory map..
766 * Fix it, but check if it's kernel or user first..
767 */
768 up_read(&mm->mmap_sem);
769
770 __bad_area_nosemaphore(regs, error_code, address, si_code);
771}
772
2d4a7167
IM
773static noinline void
774bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
92181f19
NP
775{
776 __bad_area(regs, error_code, address, SEGV_MAPERR);
777}
778
2d4a7167
IM
779static noinline void
780bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
781 unsigned long address)
92181f19
NP
782{
783 __bad_area(regs, error_code, address, SEGV_ACCERR);
784}
785
786/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
2d4a7167
IM
787static void
788out_of_memory(struct pt_regs *regs, unsigned long error_code,
789 unsigned long address)
92181f19
NP
790{
791 /*
792 * We ran out of memory, call the OOM killer, and return the userspace
2d4a7167 793 * (which will retry the fault, or kill us if we got oom-killed):
92181f19
NP
794 */
795 up_read(&current->mm->mmap_sem);
2d4a7167 796
92181f19
NP
797 pagefault_out_of_memory();
798}
799
2d4a7167
IM
800static void
801do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
92181f19
NP
802{
803 struct task_struct *tsk = current;
804 struct mm_struct *mm = tsk->mm;
805
806 up_read(&mm->mmap_sem);
807
2d4a7167 808 /* Kernel mode? Handle exceptions or die: */
92181f19
NP
809 if (!(error_code & PF_USER))
810 no_context(regs, error_code, address);
2d4a7167 811
cd1b68f0 812 /* User-space => ok to do another page fault: */
92181f19
NP
813 if (is_prefetch(regs, error_code, address))
814 return;
2d4a7167
IM
815
816 tsk->thread.cr2 = address;
817 tsk->thread.error_code = error_code;
818 tsk->thread.trap_no = 14;
819
92181f19
NP
820 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
821}
822
2d4a7167
IM
823static noinline void
824mm_fault_error(struct pt_regs *regs, unsigned long error_code,
825 unsigned long address, unsigned int fault)
92181f19 826{
2d4a7167 827 if (fault & VM_FAULT_OOM) {
92181f19 828 out_of_memory(regs, error_code, address);
2d4a7167
IM
829 } else {
830 if (fault & VM_FAULT_SIGBUS)
831 do_sigbus(regs, error_code, address);
832 else
833 BUG();
834 }
92181f19
NP
835}
836
d8b57bb7
TG
837static int spurious_fault_check(unsigned long error_code, pte_t *pte)
838{
839 if ((error_code & PF_WRITE) && !pte_write(*pte))
840 return 0;
2d4a7167 841
d8b57bb7
TG
842 if ((error_code & PF_INSTR) && !pte_exec(*pte))
843 return 0;
844
845 return 1;
846}
847
5b727a3b 848/*
2d4a7167
IM
849 * Handle a spurious fault caused by a stale TLB entry.
850 *
851 * This allows us to lazily refresh the TLB when increasing the
852 * permissions of a kernel page (RO -> RW or NX -> X). Doing it
853 * eagerly is very expensive since that implies doing a full
854 * cross-processor TLB flush, even if no stale TLB entries exist
855 * on other processors.
856 *
5b727a3b
JF
857 * There are no security implications to leaving a stale TLB when
858 * increasing the permissions on a page.
859 */
2d4a7167
IM
860static noinline int
861spurious_fault(unsigned long error_code, unsigned long address)
5b727a3b
JF
862{
863 pgd_t *pgd;
864 pud_t *pud;
865 pmd_t *pmd;
866 pte_t *pte;
3c3e5694 867 int ret;
5b727a3b
JF
868
869 /* Reserved-bit violation or user access to kernel space? */
870 if (error_code & (PF_USER | PF_RSVD))
871 return 0;
872
873 pgd = init_mm.pgd + pgd_index(address);
874 if (!pgd_present(*pgd))
875 return 0;
876
877 pud = pud_offset(pgd, address);
878 if (!pud_present(*pud))
879 return 0;
880
d8b57bb7
TG
881 if (pud_large(*pud))
882 return spurious_fault_check(error_code, (pte_t *) pud);
883
5b727a3b
JF
884 pmd = pmd_offset(pud, address);
885 if (!pmd_present(*pmd))
886 return 0;
887
d8b57bb7
TG
888 if (pmd_large(*pmd))
889 return spurious_fault_check(error_code, (pte_t *) pmd);
890
5b727a3b
JF
891 pte = pte_offset_kernel(pmd, address);
892 if (!pte_present(*pte))
893 return 0;
894
3c3e5694
SR
895 ret = spurious_fault_check(error_code, pte);
896 if (!ret)
897 return 0;
898
899 /*
2d4a7167
IM
900 * Make sure we have permissions in PMD.
901 * If not, then there's a bug in the page tables:
3c3e5694
SR
902 */
903 ret = spurious_fault_check(error_code, (pte_t *) pmd);
904 WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
2d4a7167 905
3c3e5694 906 return ret;
5b727a3b
JF
907}
908
abd4f750 909int show_unhandled_signals = 1;
1da177e4 910
2d4a7167
IM
911static inline int
912access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
92181f19
NP
913{
914 if (write) {
2d4a7167 915 /* write, present and write, not present: */
92181f19
NP
916 if (unlikely(!(vma->vm_flags & VM_WRITE)))
917 return 1;
2d4a7167 918 return 0;
92181f19
NP
919 }
920
2d4a7167
IM
921 /* read, present: */
922 if (unlikely(error_code & PF_PROT))
923 return 1;
924
925 /* read, not present: */
926 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
927 return 1;
928
92181f19
NP
929 return 0;
930}
931
0973a06c
HS
932static int fault_in_kernel_space(unsigned long address)
933{
d9517346 934 return address >= TASK_SIZE_MAX;
0973a06c
HS
935}
936
1da177e4
LT
937/*
938 * This routine handles page faults. It determines the address,
939 * and the problem, and then passes it off to one of the appropriate
940 * routines.
1da177e4 941 */
c3731c68
IM
942dotraplinkage void __kprobes
943do_page_fault(struct pt_regs *regs, unsigned long error_code)
1da177e4 944{
2d4a7167 945 struct vm_area_struct *vma;
1da177e4 946 struct task_struct *tsk;
2d4a7167 947 unsigned long address;
1da177e4 948 struct mm_struct *mm;
92181f19 949 int write;
f8c2ee22 950 int fault;
1da177e4 951
a9ba9a3b
AV
952 tsk = current;
953 mm = tsk->mm;
2d4a7167 954
2d4a7167 955 /* Get the faulting address: */
f51c9452 956 address = read_cr2();
1da177e4 957
f8561296
VN
958 /*
959 * Detect and handle instructions that would cause a page fault for
960 * both a tracked kernel page and a userspace page.
961 */
962 if (kmemcheck_active(regs))
963 kmemcheck_hide(regs);
5dfaf90f 964 prefetchw(&mm->mmap_sem);
f8561296 965
0fd0e3da 966 if (unlikely(kmmio_fault(regs, address)))
86069782 967 return;
1da177e4
LT
968
969 /*
970 * We fault-in kernel-space virtual memory on-demand. The
971 * 'reference' page table is init_mm.pgd.
972 *
973 * NOTE! We MUST NOT take any locks for this case. We may
974 * be in an interrupt or a critical region, and should
975 * only copy the information from the master page table,
976 * nothing more.
977 *
978 * This verifies that the fault happens in kernel space
979 * (error_code & 4) == 0, and that the fault was not a
8b1bde93 980 * protection error (error_code & 9) == 0.
1da177e4 981 */
0973a06c 982 if (unlikely(fault_in_kernel_space(address))) {
f8561296
VN
983 if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
984 if (vmalloc_fault(address) >= 0)
985 return;
986
987 if (kmemcheck_fault(regs, address, error_code))
988 return;
989 }
5b727a3b 990
2d4a7167 991 /* Can handle a stale RO->RW TLB: */
92181f19 992 if (spurious_fault(error_code, address))
5b727a3b
JF
993 return;
994
2d4a7167 995 /* kprobes don't want to hook the spurious faults: */
9be260a6
MH
996 if (notify_page_fault(regs))
997 return;
f8c2ee22
HH
998 /*
999 * Don't take the mm semaphore here. If we fixup a prefetch
2d4a7167 1000 * fault we could otherwise deadlock:
f8c2ee22 1001 */
92181f19 1002 bad_area_nosemaphore(regs, error_code, address);
2d4a7167 1003
92181f19 1004 return;
f8c2ee22
HH
1005 }
1006
2d4a7167 1007 /* kprobes don't want to hook the spurious faults: */
f8a6b2b9 1008 if (unlikely(notify_page_fault(regs)))
9be260a6 1009 return;
f8c2ee22 1010 /*
891cffbd
LT
1011 * It's safe to allow irq's after cr2 has been saved and the
1012 * vmalloc fault has been handled.
1013 *
1014 * User-mode registers count as a user access even for any
2d4a7167 1015 * potential system fault or CPU buglet:
f8c2ee22 1016 */
891cffbd
LT
1017 if (user_mode_vm(regs)) {
1018 local_irq_enable();
1019 error_code |= PF_USER;
2d4a7167
IM
1020 } else {
1021 if (regs->flags & X86_EFLAGS_IF)
1022 local_irq_enable();
1023 }
8c914cb7 1024
66c58156 1025 if (unlikely(error_code & PF_RSVD))
92181f19 1026 pgtable_bad(regs, error_code, address);
1da177e4 1027
f4dbfa8f 1028 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
7dd1fcc2 1029
1da177e4 1030 /*
2d4a7167
IM
1031 * If we're in an interrupt, have no user context or are running
1032 * in an atomic region then we must not take the fault:
1da177e4 1033 */
92181f19
NP
1034 if (unlikely(in_atomic() || !mm)) {
1035 bad_area_nosemaphore(regs, error_code, address);
1036 return;
1037 }
1da177e4 1038
3a1dfe6e
IM
1039 /*
1040 * When running in the kernel we expect faults to occur only to
2d4a7167
IM
1041 * addresses in user space. All other faults represent errors in
1042 * the kernel and should generate an OOPS. Unfortunately, in the
1043 * case of an erroneous fault occurring in a code path which already
1044 * holds mmap_sem we will deadlock attempting to validate the fault
1045 * against the address space. Luckily the kernel only validly
1046 * references user space from well defined areas of code, which are
1047 * listed in the exceptions table.
1da177e4
LT
1048 *
1049 * As the vast majority of faults will be valid we will only perform
2d4a7167
IM
1050 * the source reference check when there is a possibility of a
1051 * deadlock. Attempt to lock the address space, if we cannot we then
1052 * validate the source. If this is invalid we can skip the address
1053 * space check, thus avoiding the deadlock:
1da177e4 1054 */
92181f19 1055 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
66c58156 1056 if ((error_code & PF_USER) == 0 &&
92181f19
NP
1057 !search_exception_tables(regs->ip)) {
1058 bad_area_nosemaphore(regs, error_code, address);
1059 return;
1060 }
1da177e4 1061 down_read(&mm->mmap_sem);
01006074
PZ
1062 } else {
1063 /*
2d4a7167
IM
1064 * The above down_read_trylock() might have succeeded in
1065 * which case we'll have missed the might_sleep() from
1066 * down_read():
01006074
PZ
1067 */
1068 might_sleep();
1da177e4
LT
1069 }
1070
1071 vma = find_vma(mm, address);
92181f19
NP
1072 if (unlikely(!vma)) {
1073 bad_area(regs, error_code, address);
1074 return;
1075 }
1076 if (likely(vma->vm_start <= address))
1da177e4 1077 goto good_area;
92181f19
NP
1078 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1079 bad_area(regs, error_code, address);
1080 return;
1081 }
33cb5243 1082 if (error_code & PF_USER) {
6f4d368e
HH
1083 /*
1084 * Accessing the stack below %sp is always a bug.
1085 * The large cushion allows instructions like enter
2d4a7167 1086 * and pusha to work. ("enter $65535, $31" pushes
6f4d368e 1087 * 32 pointers and then decrements %sp by 65535.)
03fdc2c2 1088 */
92181f19
NP
1089 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1090 bad_area(regs, error_code, address);
1091 return;
1092 }
1da177e4 1093 }
92181f19
NP
1094 if (unlikely(expand_stack(vma, address))) {
1095 bad_area(regs, error_code, address);
1096 return;
1097 }
1098
1099 /*
1100 * Ok, we have a good vm_area for this memory access, so
1101 * we can handle it..
1102 */
1da177e4 1103good_area:
92181f19 1104 write = error_code & PF_WRITE;
2d4a7167 1105
92181f19
NP
1106 if (unlikely(access_error(error_code, write, vma))) {
1107 bad_area_access_error(regs, error_code, address);
1108 return;
1da177e4
LT
1109 }
1110
1111 /*
1112 * If for any reason at all we couldn't handle the fault,
1113 * make sure we exit gracefully rather than endlessly redo
2d4a7167 1114 * the fault:
1da177e4 1115 */
d06063cc 1116 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
2d4a7167 1117
83c54070 1118 if (unlikely(fault & VM_FAULT_ERROR)) {
92181f19
NP
1119 mm_fault_error(regs, error_code, address, fault);
1120 return;
1da177e4 1121 }
2d4a7167 1122
ac17dc8e 1123 if (fault & VM_FAULT_MAJOR) {
83c54070 1124 tsk->maj_flt++;
f4dbfa8f 1125 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
78f13e95 1126 regs, address);
ac17dc8e 1127 } else {
83c54070 1128 tsk->min_flt++;
f4dbfa8f 1129 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
78f13e95 1130 regs, address);
ac17dc8e 1131 }
d729ab35 1132
8c938f9f
IM
1133 check_v8086_mode(regs, address, tsk);
1134
1da177e4 1135 up_read(&mm->mmap_sem);
1da177e4 1136}
This page took 0.516969 seconds and 5 git commands to generate.