Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1995 Linus Torvalds |
2d4a7167 | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
f8eeb2e6 | 4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar |
1da177e4 | 5 | */ |
a2bcd473 IM |
6 | #include <linux/magic.h> /* STACK_END_MAGIC */ |
7 | #include <linux/sched.h> /* test_thread_flag(), ... */ | |
8 | #include <linux/kdebug.h> /* oops_begin/end, ... */ | |
9 | #include <linux/module.h> /* search_exception_table */ | |
10 | #include <linux/bootmem.h> /* max_low_pfn */ | |
11 | #include <linux/kprobes.h> /* __kprobes, ... */ | |
12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ | |
cdd6c482 | 13 | #include <linux/perf_event.h> /* perf_sw_event */ |
f672b49b | 14 | #include <linux/hugetlb.h> /* hstate_index_to_shift */ |
2d4a7167 | 15 | |
a2bcd473 IM |
16 | #include <asm/traps.h> /* dotraplinkage, ... */ |
17 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | |
f8561296 | 18 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
1da177e4 | 19 | |
33cb5243 | 20 | /* |
2d4a7167 IM |
21 | * Page fault error code bits: |
22 | * | |
23 | * bit 0 == 0: no page found 1: protection fault | |
24 | * bit 1 == 0: read access 1: write access | |
25 | * bit 2 == 0: kernel-mode access 1: user-mode access | |
26 | * bit 3 == 1: use of reserved bit detected | |
27 | * bit 4 == 1: fault was an instruction fetch | |
33cb5243 | 28 | */ |
2d4a7167 IM |
29 | enum x86_pf_error_code { |
30 | ||
31 | PF_PROT = 1 << 0, | |
32 | PF_WRITE = 1 << 1, | |
33 | PF_USER = 1 << 2, | |
34 | PF_RSVD = 1 << 3, | |
35 | PF_INSTR = 1 << 4, | |
36 | }; | |
66c58156 | 37 | |
b814d41f | 38 | /* |
b319eed0 IM |
39 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
40 | * handled by mmiotrace: | |
b814d41f | 41 | */ |
62c9295f MH |
42 | static inline int __kprobes |
43 | kmmio_fault(struct pt_regs *regs, unsigned long addr) | |
86069782 | 44 | { |
0fd0e3da PP |
45 | if (unlikely(is_kmmio_active())) |
46 | if (kmmio_handler(regs, addr) == 1) | |
47 | return -1; | |
0fd0e3da | 48 | return 0; |
86069782 PP |
49 | } |
50 | ||
62c9295f | 51 | static inline int __kprobes notify_page_fault(struct pt_regs *regs) |
1bd858a5 | 52 | { |
74a0b576 CH |
53 | int ret = 0; |
54 | ||
55 | /* kprobe_running() needs smp_processor_id() */ | |
b1801812 | 56 | if (kprobes_built_in() && !user_mode_vm(regs)) { |
74a0b576 CH |
57 | preempt_disable(); |
58 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
59 | ret = 1; | |
60 | preempt_enable(); | |
61 | } | |
1bd858a5 | 62 | |
74a0b576 | 63 | return ret; |
33cb5243 | 64 | } |
1bd858a5 | 65 | |
1dc85be0 | 66 | /* |
2d4a7167 IM |
67 | * Prefetch quirks: |
68 | * | |
69 | * 32-bit mode: | |
70 | * | |
71 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
72 | * Check that here and ignore it. | |
1dc85be0 | 73 | * |
2d4a7167 | 74 | * 64-bit mode: |
1dc85be0 | 75 | * |
2d4a7167 IM |
76 | * Sometimes the CPU reports invalid exceptions on prefetch. |
77 | * Check that here and ignore it. | |
78 | * | |
79 | * Opcode checker based on code by Richard Brunner. | |
1dc85be0 | 80 | */ |
107a0367 IM |
81 | static inline int |
82 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
83 | unsigned char opcode, int *prefetch) | |
84 | { | |
85 | unsigned char instr_hi = opcode & 0xf0; | |
86 | unsigned char instr_lo = opcode & 0x0f; | |
87 | ||
88 | switch (instr_hi) { | |
89 | case 0x20: | |
90 | case 0x30: | |
91 | /* | |
92 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
93 | * In X86_64 long mode, the CPU will signal invalid | |
94 | * opcode if some of these prefixes are present so | |
95 | * X86_64 will never get here anyway | |
96 | */ | |
97 | return ((instr_lo & 7) == 0x6); | |
98 | #ifdef CONFIG_X86_64 | |
99 | case 0x40: | |
100 | /* | |
101 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
102 | * Need to figure out under what instruction mode the | |
103 | * instruction was issued. Could check the LDT for lm, | |
104 | * but for now it's good enough to assume that long | |
105 | * mode only uses well known segments or kernel. | |
106 | */ | |
107 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | |
108 | #endif | |
109 | case 0x60: | |
110 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
111 | return (instr_lo & 0xC) == 0x4; | |
112 | case 0xF0: | |
113 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
114 | return !instr_lo || (instr_lo>>1) == 1; | |
115 | case 0x00: | |
116 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
117 | if (probe_kernel_address(instr, opcode)) | |
118 | return 0; | |
119 | ||
120 | *prefetch = (instr_lo == 0xF) && | |
121 | (opcode == 0x0D || opcode == 0x18); | |
122 | return 0; | |
123 | default: | |
124 | return 0; | |
125 | } | |
126 | } | |
127 | ||
2d4a7167 IM |
128 | static int |
129 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
33cb5243 | 130 | { |
2d4a7167 | 131 | unsigned char *max_instr; |
ab2bf0c1 | 132 | unsigned char *instr; |
33cb5243 | 133 | int prefetch = 0; |
1da177e4 | 134 | |
3085354d IM |
135 | /* |
136 | * If it was a exec (instruction fetch) fault on NX page, then | |
137 | * do not ignore the fault: | |
138 | */ | |
66c58156 | 139 | if (error_code & PF_INSTR) |
1da177e4 | 140 | return 0; |
1dc85be0 | 141 | |
107a0367 | 142 | instr = (void *)convert_ip_to_linear(current, regs); |
f1290ec9 | 143 | max_instr = instr + 15; |
1da177e4 | 144 | |
76381fee | 145 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
1da177e4 LT |
146 | return 0; |
147 | ||
107a0367 | 148 | while (instr < max_instr) { |
2d4a7167 | 149 | unsigned char opcode; |
1da177e4 | 150 | |
ab2bf0c1 | 151 | if (probe_kernel_address(instr, opcode)) |
33cb5243 | 152 | break; |
1da177e4 | 153 | |
1da177e4 LT |
154 | instr++; |
155 | ||
107a0367 | 156 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
1da177e4 | 157 | break; |
1da177e4 LT |
158 | } |
159 | return prefetch; | |
160 | } | |
161 | ||
2d4a7167 IM |
162 | static void |
163 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |
f672b49b | 164 | struct task_struct *tsk, int fault) |
c4aba4a8 | 165 | { |
f672b49b | 166 | unsigned lsb = 0; |
c4aba4a8 HH |
167 | siginfo_t info; |
168 | ||
2d4a7167 IM |
169 | info.si_signo = si_signo; |
170 | info.si_errno = 0; | |
171 | info.si_code = si_code; | |
172 | info.si_addr = (void __user *)address; | |
f672b49b AK |
173 | if (fault & VM_FAULT_HWPOISON_LARGE) |
174 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
175 | if (fault & VM_FAULT_HWPOISON) | |
176 | lsb = PAGE_SHIFT; | |
177 | info.si_addr_lsb = lsb; | |
2d4a7167 | 178 | |
c4aba4a8 HH |
179 | force_sig_info(si_signo, &info, tsk); |
180 | } | |
181 | ||
f2f13a85 IM |
182 | DEFINE_SPINLOCK(pgd_lock); |
183 | LIST_HEAD(pgd_list); | |
184 | ||
185 | #ifdef CONFIG_X86_32 | |
186 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
33cb5243 | 187 | { |
f2f13a85 IM |
188 | unsigned index = pgd_index(address); |
189 | pgd_t *pgd_k; | |
190 | pud_t *pud, *pud_k; | |
191 | pmd_t *pmd, *pmd_k; | |
2d4a7167 | 192 | |
f2f13a85 IM |
193 | pgd += index; |
194 | pgd_k = init_mm.pgd + index; | |
195 | ||
196 | if (!pgd_present(*pgd_k)) | |
197 | return NULL; | |
198 | ||
199 | /* | |
200 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
201 | * and redundant with the set_pmd() on non-PAE. As would | |
202 | * set_pud. | |
203 | */ | |
204 | pud = pud_offset(pgd, address); | |
205 | pud_k = pud_offset(pgd_k, address); | |
206 | if (!pud_present(*pud_k)) | |
207 | return NULL; | |
208 | ||
209 | pmd = pmd_offset(pud, address); | |
210 | pmd_k = pmd_offset(pud_k, address); | |
211 | if (!pmd_present(*pmd_k)) | |
212 | return NULL; | |
213 | ||
b8bcfe99 | 214 | if (!pmd_present(*pmd)) |
f2f13a85 | 215 | set_pmd(pmd, *pmd_k); |
b8bcfe99 | 216 | else |
f2f13a85 | 217 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
f2f13a85 IM |
218 | |
219 | return pmd_k; | |
220 | } | |
221 | ||
222 | void vmalloc_sync_all(void) | |
223 | { | |
224 | unsigned long address; | |
225 | ||
226 | if (SHARED_KERNEL_PMD) | |
227 | return; | |
228 | ||
229 | for (address = VMALLOC_START & PMD_MASK; | |
230 | address >= TASK_SIZE && address < FIXADDR_TOP; | |
231 | address += PMD_SIZE) { | |
232 | ||
233 | unsigned long flags; | |
234 | struct page *page; | |
235 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | |
237 | list_for_each_entry(page, &pgd_list, lru) { | |
617d34d9 | 238 | spinlock_t *pgt_lock; |
f01f7c56 | 239 | pmd_t *ret; |
617d34d9 JF |
240 | |
241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | |
242 | ||
243 | spin_lock(pgt_lock); | |
244 | ret = vmalloc_sync_one(page_address(page), address); | |
245 | spin_unlock(pgt_lock); | |
246 | ||
247 | if (!ret) | |
f2f13a85 IM |
248 | break; |
249 | } | |
250 | spin_unlock_irqrestore(&pgd_lock, flags); | |
251 | } | |
252 | } | |
253 | ||
254 | /* | |
255 | * 32-bit: | |
256 | * | |
257 | * Handle a fault on the vmalloc or module mapping area | |
258 | */ | |
62c9295f | 259 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
260 | { |
261 | unsigned long pgd_paddr; | |
262 | pmd_t *pmd_k; | |
263 | pte_t *pte_k; | |
264 | ||
265 | /* Make sure we are in vmalloc area: */ | |
266 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
267 | return -1; | |
268 | ||
ebc8827f FW |
269 | WARN_ON_ONCE(in_nmi()); |
270 | ||
f2f13a85 IM |
271 | /* |
272 | * Synchronize this task's top level page-table | |
273 | * with the 'reference' page table. | |
274 | * | |
275 | * Do _not_ use "current" here. We might be inside | |
276 | * an interrupt in the middle of a task switch.. | |
277 | */ | |
278 | pgd_paddr = read_cr3(); | |
279 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
280 | if (!pmd_k) | |
281 | return -1; | |
282 | ||
283 | pte_k = pte_offset_kernel(pmd_k, address); | |
284 | if (!pte_present(*pte_k)) | |
285 | return -1; | |
286 | ||
287 | return 0; | |
288 | } | |
289 | ||
290 | /* | |
291 | * Did it hit the DOS screen memory VA from vm86 mode? | |
292 | */ | |
293 | static inline void | |
294 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
295 | struct task_struct *tsk) | |
296 | { | |
297 | unsigned long bit; | |
298 | ||
299 | if (!v8086_mode(regs)) | |
300 | return; | |
301 | ||
302 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
303 | if (bit < 32) | |
304 | tsk->thread.screen_bitmap |= 1 << bit; | |
33cb5243 | 305 | } |
1da177e4 | 306 | |
087975b0 | 307 | static bool low_pfn(unsigned long pfn) |
1da177e4 | 308 | { |
087975b0 AM |
309 | return pfn < max_low_pfn; |
310 | } | |
1156e098 | 311 | |
087975b0 AM |
312 | static void dump_pagetable(unsigned long address) |
313 | { | |
314 | pgd_t *base = __va(read_cr3()); | |
315 | pgd_t *pgd = &base[pgd_index(address)]; | |
316 | pmd_t *pmd; | |
317 | pte_t *pte; | |
2d4a7167 | 318 | |
1156e098 | 319 | #ifdef CONFIG_X86_PAE |
087975b0 AM |
320 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
321 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) | |
322 | goto out; | |
1156e098 | 323 | #endif |
087975b0 AM |
324 | pmd = pmd_offset(pud_offset(pgd, address), address); |
325 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | |
1156e098 HH |
326 | |
327 | /* | |
328 | * We must not directly access the pte in the highpte | |
329 | * case if the page table is located in highmem. | |
330 | * And let's rather not kmap-atomic the pte, just in case | |
2d4a7167 | 331 | * it's allocated already: |
1156e098 | 332 | */ |
087975b0 AM |
333 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
334 | goto out; | |
1156e098 | 335 | |
087975b0 AM |
336 | pte = pte_offset_kernel(pmd, address); |
337 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | |
338 | out: | |
1156e098 | 339 | printk("\n"); |
f2f13a85 IM |
340 | } |
341 | ||
342 | #else /* CONFIG_X86_64: */ | |
343 | ||
344 | void vmalloc_sync_all(void) | |
345 | { | |
6afb5157 | 346 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
f2f13a85 IM |
347 | } |
348 | ||
349 | /* | |
350 | * 64-bit: | |
351 | * | |
352 | * Handle a fault on the vmalloc area | |
353 | * | |
354 | * This assumes no large pages in there. | |
355 | */ | |
62c9295f | 356 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
357 | { |
358 | pgd_t *pgd, *pgd_ref; | |
359 | pud_t *pud, *pud_ref; | |
360 | pmd_t *pmd, *pmd_ref; | |
361 | pte_t *pte, *pte_ref; | |
362 | ||
363 | /* Make sure we are in vmalloc area: */ | |
364 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
365 | return -1; | |
366 | ||
ebc8827f FW |
367 | WARN_ON_ONCE(in_nmi()); |
368 | ||
f2f13a85 IM |
369 | /* |
370 | * Copy kernel mappings over when needed. This can also | |
371 | * happen within a race in page table update. In the later | |
372 | * case just flush: | |
373 | */ | |
374 | pgd = pgd_offset(current->active_mm, address); | |
375 | pgd_ref = pgd_offset_k(address); | |
376 | if (pgd_none(*pgd_ref)) | |
377 | return -1; | |
378 | ||
379 | if (pgd_none(*pgd)) | |
380 | set_pgd(pgd, *pgd_ref); | |
381 | else | |
382 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
383 | ||
384 | /* | |
385 | * Below here mismatches are bugs because these lower tables | |
386 | * are shared: | |
387 | */ | |
388 | ||
389 | pud = pud_offset(pgd, address); | |
390 | pud_ref = pud_offset(pgd_ref, address); | |
391 | if (pud_none(*pud_ref)) | |
392 | return -1; | |
393 | ||
394 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | |
395 | BUG(); | |
396 | ||
397 | pmd = pmd_offset(pud, address); | |
398 | pmd_ref = pmd_offset(pud_ref, address); | |
399 | if (pmd_none(*pmd_ref)) | |
400 | return -1; | |
401 | ||
402 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | |
403 | BUG(); | |
404 | ||
405 | pte_ref = pte_offset_kernel(pmd_ref, address); | |
406 | if (!pte_present(*pte_ref)) | |
407 | return -1; | |
408 | ||
409 | pte = pte_offset_kernel(pmd, address); | |
410 | ||
411 | /* | |
412 | * Don't use pte_page here, because the mappings can point | |
413 | * outside mem_map, and the NUMA hash lookup cannot handle | |
414 | * that: | |
415 | */ | |
416 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
417 | BUG(); | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
422 | static const char errata93_warning[] = | |
ad361c98 JP |
423 | KERN_ERR |
424 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
425 | "******* Working around it, but it may cause SEGVs or burn power.\n" | |
426 | "******* Please consider a BIOS update.\n" | |
427 | "******* Disabling USB legacy in the BIOS may also help.\n"; | |
f2f13a85 IM |
428 | |
429 | /* | |
430 | * No vm86 mode in 64-bit mode: | |
431 | */ | |
432 | static inline void | |
433 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
434 | struct task_struct *tsk) | |
435 | { | |
436 | } | |
437 | ||
438 | static int bad_address(void *p) | |
439 | { | |
440 | unsigned long dummy; | |
441 | ||
442 | return probe_kernel_address((unsigned long *)p, dummy); | |
443 | } | |
444 | ||
445 | static void dump_pagetable(unsigned long address) | |
446 | { | |
087975b0 AM |
447 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
448 | pgd_t *pgd = base + pgd_index(address); | |
1da177e4 LT |
449 | pud_t *pud; |
450 | pmd_t *pmd; | |
451 | pte_t *pte; | |
452 | ||
2d4a7167 IM |
453 | if (bad_address(pgd)) |
454 | goto bad; | |
455 | ||
d646bce4 | 456 | printk("PGD %lx ", pgd_val(*pgd)); |
2d4a7167 IM |
457 | |
458 | if (!pgd_present(*pgd)) | |
459 | goto out; | |
1da177e4 | 460 | |
d2ae5b5f | 461 | pud = pud_offset(pgd, address); |
2d4a7167 IM |
462 | if (bad_address(pud)) |
463 | goto bad; | |
464 | ||
1da177e4 | 465 | printk("PUD %lx ", pud_val(*pud)); |
b5360222 | 466 | if (!pud_present(*pud) || pud_large(*pud)) |
2d4a7167 | 467 | goto out; |
1da177e4 LT |
468 | |
469 | pmd = pmd_offset(pud, address); | |
2d4a7167 IM |
470 | if (bad_address(pmd)) |
471 | goto bad; | |
472 | ||
1da177e4 | 473 | printk("PMD %lx ", pmd_val(*pmd)); |
2d4a7167 IM |
474 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
475 | goto out; | |
1da177e4 LT |
476 | |
477 | pte = pte_offset_kernel(pmd, address); | |
2d4a7167 IM |
478 | if (bad_address(pte)) |
479 | goto bad; | |
480 | ||
33cb5243 | 481 | printk("PTE %lx", pte_val(*pte)); |
2d4a7167 | 482 | out: |
1da177e4 LT |
483 | printk("\n"); |
484 | return; | |
485 | bad: | |
486 | printk("BAD\n"); | |
8c938f9f IM |
487 | } |
488 | ||
f2f13a85 | 489 | #endif /* CONFIG_X86_64 */ |
1da177e4 | 490 | |
2d4a7167 IM |
491 | /* |
492 | * Workaround for K8 erratum #93 & buggy BIOS. | |
493 | * | |
494 | * BIOS SMM functions are required to use a specific workaround | |
495 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
496 | * | |
497 | * A lot of BIOS that didn't get tested properly miss this. | |
498 | * | |
499 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
500 | * Try to work around it here. | |
501 | * | |
502 | * Note we only handle faults in kernel here. | |
503 | * Does nothing on 32-bit. | |
fdfe8aa8 | 504 | */ |
33cb5243 | 505 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
1da177e4 | 506 | { |
fdfe8aa8 | 507 | #ifdef CONFIG_X86_64 |
65ea5b03 | 508 | if (address != regs->ip) |
1da177e4 | 509 | return 0; |
2d4a7167 | 510 | |
33cb5243 | 511 | if ((address >> 32) != 0) |
1da177e4 | 512 | return 0; |
2d4a7167 | 513 | |
1da177e4 | 514 | address |= 0xffffffffUL << 32; |
33cb5243 HH |
515 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
516 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
a454ab31 | 517 | printk_once(errata93_warning); |
65ea5b03 | 518 | regs->ip = address; |
1da177e4 LT |
519 | return 1; |
520 | } | |
fdfe8aa8 | 521 | #endif |
1da177e4 | 522 | return 0; |
33cb5243 | 523 | } |
1da177e4 | 524 | |
35f3266f | 525 | /* |
2d4a7167 IM |
526 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
527 | * to illegal addresses >4GB. | |
528 | * | |
529 | * We catch this in the page fault handler because these addresses | |
530 | * are not reachable. Just detect this case and return. Any code | |
35f3266f HH |
531 | * segment in LDT is compatibility mode. |
532 | */ | |
533 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
534 | { | |
535 | #ifdef CONFIG_X86_64 | |
2d4a7167 | 536 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
35f3266f HH |
537 | return 1; |
538 | #endif | |
539 | return 0; | |
540 | } | |
541 | ||
29caf2f9 HH |
542 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
543 | { | |
544 | #ifdef CONFIG_X86_F00F_BUG | |
545 | unsigned long nr; | |
2d4a7167 | 546 | |
29caf2f9 | 547 | /* |
2d4a7167 | 548 | * Pentium F0 0F C7 C8 bug workaround: |
29caf2f9 HH |
549 | */ |
550 | if (boot_cpu_data.f00f_bug) { | |
551 | nr = (address - idt_descr.address) >> 3; | |
552 | ||
553 | if (nr == 6) { | |
554 | do_invalid_op(regs, 0); | |
555 | return 1; | |
556 | } | |
557 | } | |
558 | #endif | |
559 | return 0; | |
560 | } | |
561 | ||
8f766149 IM |
562 | static const char nx_warning[] = KERN_CRIT |
563 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | |
564 | ||
2d4a7167 IM |
565 | static void |
566 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
567 | unsigned long address) | |
b3279c7f | 568 | { |
1156e098 HH |
569 | if (!oops_may_print()) |
570 | return; | |
571 | ||
1156e098 | 572 | if (error_code & PF_INSTR) { |
93809be8 | 573 | unsigned int level; |
2d4a7167 | 574 | |
1156e098 HH |
575 | pte_t *pte = lookup_address(address, &level); |
576 | ||
8f766149 IM |
577 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
578 | printk(nx_warning, current_uid()); | |
1156e098 | 579 | } |
1156e098 | 580 | |
19f0dda9 | 581 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
b3279c7f | 582 | if (address < PAGE_SIZE) |
19f0dda9 | 583 | printk(KERN_CONT "NULL pointer dereference"); |
b3279c7f | 584 | else |
19f0dda9 | 585 | printk(KERN_CONT "paging request"); |
2d4a7167 | 586 | |
f294a8ce | 587 | printk(KERN_CONT " at %p\n", (void *) address); |
19f0dda9 | 588 | printk(KERN_ALERT "IP:"); |
b3279c7f | 589 | printk_address(regs->ip, 1); |
2d4a7167 | 590 | |
b3279c7f HH |
591 | dump_pagetable(address); |
592 | } | |
593 | ||
2d4a7167 IM |
594 | static noinline void |
595 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
596 | unsigned long address) | |
1da177e4 | 597 | { |
2d4a7167 IM |
598 | struct task_struct *tsk; |
599 | unsigned long flags; | |
600 | int sig; | |
601 | ||
602 | flags = oops_begin(); | |
603 | tsk = current; | |
604 | sig = SIGKILL; | |
1209140c | 605 | |
1da177e4 | 606 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
92181f19 | 607 | tsk->comm, address); |
1da177e4 | 608 | dump_pagetable(address); |
2d4a7167 IM |
609 | |
610 | tsk->thread.cr2 = address; | |
611 | tsk->thread.trap_no = 14; | |
612 | tsk->thread.error_code = error_code; | |
613 | ||
22f5991c | 614 | if (__die("Bad pagetable", regs, error_code)) |
874d93d1 | 615 | sig = 0; |
2d4a7167 | 616 | |
874d93d1 | 617 | oops_end(flags, regs, sig); |
1da177e4 LT |
618 | } |
619 | ||
2d4a7167 IM |
620 | static noinline void |
621 | no_context(struct pt_regs *regs, unsigned long error_code, | |
622 | unsigned long address) | |
92181f19 NP |
623 | { |
624 | struct task_struct *tsk = current; | |
19803078 | 625 | unsigned long *stackend; |
92181f19 NP |
626 | unsigned long flags; |
627 | int sig; | |
92181f19 | 628 | |
2d4a7167 | 629 | /* Are we prepared to handle this kernel fault? */ |
92181f19 NP |
630 | if (fixup_exception(regs)) |
631 | return; | |
632 | ||
633 | /* | |
2d4a7167 IM |
634 | * 32-bit: |
635 | * | |
636 | * Valid to do another page fault here, because if this fault | |
637 | * had been triggered by is_prefetch fixup_exception would have | |
638 | * handled it. | |
639 | * | |
640 | * 64-bit: | |
92181f19 | 641 | * |
2d4a7167 | 642 | * Hall of shame of CPU/BIOS bugs. |
92181f19 NP |
643 | */ |
644 | if (is_prefetch(regs, error_code, address)) | |
645 | return; | |
646 | ||
647 | if (is_errata93(regs, address)) | |
648 | return; | |
649 | ||
650 | /* | |
651 | * Oops. The kernel tried to access some bad page. We'll have to | |
2d4a7167 | 652 | * terminate things with extreme prejudice: |
92181f19 | 653 | */ |
92181f19 | 654 | flags = oops_begin(); |
92181f19 NP |
655 | |
656 | show_fault_oops(regs, error_code, address); | |
657 | ||
2d4a7167 | 658 | stackend = end_of_stack(tsk); |
0e7810be | 659 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) |
19803078 IM |
660 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
661 | ||
1cc99544 IM |
662 | tsk->thread.cr2 = address; |
663 | tsk->thread.trap_no = 14; | |
664 | tsk->thread.error_code = error_code; | |
92181f19 | 665 | |
92181f19 NP |
666 | sig = SIGKILL; |
667 | if (__die("Oops", regs, error_code)) | |
668 | sig = 0; | |
2d4a7167 | 669 | |
92181f19 NP |
670 | /* Executive summary in case the body of the oops scrolled away */ |
671 | printk(KERN_EMERG "CR2: %016lx\n", address); | |
2d4a7167 | 672 | |
92181f19 | 673 | oops_end(flags, regs, sig); |
92181f19 NP |
674 | } |
675 | ||
2d4a7167 IM |
676 | /* |
677 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
678 | * sysctl is set: | |
679 | */ | |
680 | static inline void | |
681 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
682 | unsigned long address, struct task_struct *tsk) | |
683 | { | |
684 | if (!unhandled_signal(tsk, SIGSEGV)) | |
685 | return; | |
686 | ||
687 | if (!printk_ratelimit()) | |
688 | return; | |
689 | ||
a1a08d1c | 690 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
2d4a7167 IM |
691 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
692 | tsk->comm, task_pid_nr(tsk), address, | |
693 | (void *)regs->ip, (void *)regs->sp, error_code); | |
694 | ||
695 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
696 | ||
697 | printk(KERN_CONT "\n"); | |
698 | } | |
699 | ||
700 | static void | |
701 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
702 | unsigned long address, int si_code) | |
92181f19 NP |
703 | { |
704 | struct task_struct *tsk = current; | |
705 | ||
706 | /* User mode accesses just cause a SIGSEGV */ | |
707 | if (error_code & PF_USER) { | |
708 | /* | |
2d4a7167 | 709 | * It's possible to have interrupts off here: |
92181f19 NP |
710 | */ |
711 | local_irq_enable(); | |
712 | ||
713 | /* | |
714 | * Valid to do another page fault here because this one came | |
2d4a7167 | 715 | * from user space: |
92181f19 NP |
716 | */ |
717 | if (is_prefetch(regs, error_code, address)) | |
718 | return; | |
719 | ||
720 | if (is_errata100(regs, address)) | |
721 | return; | |
722 | ||
2d4a7167 IM |
723 | if (unlikely(show_unhandled_signals)) |
724 | show_signal_msg(regs, error_code, address, tsk); | |
725 | ||
726 | /* Kernel addresses are always protection faults: */ | |
727 | tsk->thread.cr2 = address; | |
728 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
729 | tsk->thread.trap_no = 14; | |
92181f19 | 730 | |
f672b49b | 731 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
2d4a7167 | 732 | |
92181f19 NP |
733 | return; |
734 | } | |
735 | ||
736 | if (is_f00f_bug(regs, address)) | |
737 | return; | |
738 | ||
739 | no_context(regs, error_code, address); | |
740 | } | |
741 | ||
2d4a7167 IM |
742 | static noinline void |
743 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
744 | unsigned long address) | |
92181f19 NP |
745 | { |
746 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | |
747 | } | |
748 | ||
2d4a7167 IM |
749 | static void |
750 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
751 | unsigned long address, int si_code) | |
92181f19 NP |
752 | { |
753 | struct mm_struct *mm = current->mm; | |
754 | ||
755 | /* | |
756 | * Something tried to access memory that isn't in our memory map.. | |
757 | * Fix it, but check if it's kernel or user first.. | |
758 | */ | |
759 | up_read(&mm->mmap_sem); | |
760 | ||
761 | __bad_area_nosemaphore(regs, error_code, address, si_code); | |
762 | } | |
763 | ||
2d4a7167 IM |
764 | static noinline void |
765 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 NP |
766 | { |
767 | __bad_area(regs, error_code, address, SEGV_MAPERR); | |
768 | } | |
769 | ||
2d4a7167 IM |
770 | static noinline void |
771 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
772 | unsigned long address) | |
92181f19 NP |
773 | { |
774 | __bad_area(regs, error_code, address, SEGV_ACCERR); | |
775 | } | |
776 | ||
777 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | |
2d4a7167 IM |
778 | static void |
779 | out_of_memory(struct pt_regs *regs, unsigned long error_code, | |
780 | unsigned long address) | |
92181f19 NP |
781 | { |
782 | /* | |
783 | * We ran out of memory, call the OOM killer, and return the userspace | |
2d4a7167 | 784 | * (which will retry the fault, or kill us if we got oom-killed): |
92181f19 NP |
785 | */ |
786 | up_read(¤t->mm->mmap_sem); | |
2d4a7167 | 787 | |
92181f19 NP |
788 | pagefault_out_of_memory(); |
789 | } | |
790 | ||
2d4a7167 | 791 | static void |
a6e04aa9 AK |
792 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
793 | unsigned int fault) | |
92181f19 NP |
794 | { |
795 | struct task_struct *tsk = current; | |
796 | struct mm_struct *mm = tsk->mm; | |
a6e04aa9 | 797 | int code = BUS_ADRERR; |
92181f19 NP |
798 | |
799 | up_read(&mm->mmap_sem); | |
800 | ||
2d4a7167 | 801 | /* Kernel mode? Handle exceptions or die: */ |
96054569 | 802 | if (!(error_code & PF_USER)) { |
92181f19 | 803 | no_context(regs, error_code, address); |
96054569 LT |
804 | return; |
805 | } | |
2d4a7167 | 806 | |
cd1b68f0 | 807 | /* User-space => ok to do another page fault: */ |
92181f19 NP |
808 | if (is_prefetch(regs, error_code, address)) |
809 | return; | |
2d4a7167 IM |
810 | |
811 | tsk->thread.cr2 = address; | |
812 | tsk->thread.error_code = error_code; | |
813 | tsk->thread.trap_no = 14; | |
814 | ||
a6e04aa9 | 815 | #ifdef CONFIG_MEMORY_FAILURE |
f672b49b | 816 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
a6e04aa9 AK |
817 | printk(KERN_ERR |
818 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | |
819 | tsk->comm, tsk->pid, address); | |
820 | code = BUS_MCEERR_AR; | |
821 | } | |
822 | #endif | |
f672b49b | 823 | force_sig_info_fault(SIGBUS, code, address, tsk, fault); |
92181f19 NP |
824 | } |
825 | ||
2d4a7167 IM |
826 | static noinline void |
827 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |
828 | unsigned long address, unsigned int fault) | |
92181f19 | 829 | { |
2d4a7167 | 830 | if (fault & VM_FAULT_OOM) { |
f8626854 AV |
831 | /* Kernel mode? Handle exceptions or die: */ |
832 | if (!(error_code & PF_USER)) { | |
833 | up_read(¤t->mm->mmap_sem); | |
834 | no_context(regs, error_code, address); | |
835 | return; | |
836 | } | |
837 | ||
92181f19 | 838 | out_of_memory(regs, error_code, address); |
2d4a7167 | 839 | } else { |
f672b49b AK |
840 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
841 | VM_FAULT_HWPOISON_LARGE)) | |
a6e04aa9 | 842 | do_sigbus(regs, error_code, address, fault); |
2d4a7167 IM |
843 | else |
844 | BUG(); | |
845 | } | |
92181f19 NP |
846 | } |
847 | ||
d8b57bb7 TG |
848 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
849 | { | |
850 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
851 | return 0; | |
2d4a7167 | 852 | |
d8b57bb7 TG |
853 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
854 | return 0; | |
855 | ||
856 | return 1; | |
857 | } | |
858 | ||
5b727a3b | 859 | /* |
2d4a7167 IM |
860 | * Handle a spurious fault caused by a stale TLB entry. |
861 | * | |
862 | * This allows us to lazily refresh the TLB when increasing the | |
863 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
864 | * eagerly is very expensive since that implies doing a full | |
865 | * cross-processor TLB flush, even if no stale TLB entries exist | |
866 | * on other processors. | |
867 | * | |
5b727a3b JF |
868 | * There are no security implications to leaving a stale TLB when |
869 | * increasing the permissions on a page. | |
870 | */ | |
62c9295f | 871 | static noinline __kprobes int |
2d4a7167 | 872 | spurious_fault(unsigned long error_code, unsigned long address) |
5b727a3b JF |
873 | { |
874 | pgd_t *pgd; | |
875 | pud_t *pud; | |
876 | pmd_t *pmd; | |
877 | pte_t *pte; | |
3c3e5694 | 878 | int ret; |
5b727a3b JF |
879 | |
880 | /* Reserved-bit violation or user access to kernel space? */ | |
881 | if (error_code & (PF_USER | PF_RSVD)) | |
882 | return 0; | |
883 | ||
884 | pgd = init_mm.pgd + pgd_index(address); | |
885 | if (!pgd_present(*pgd)) | |
886 | return 0; | |
887 | ||
888 | pud = pud_offset(pgd, address); | |
889 | if (!pud_present(*pud)) | |
890 | return 0; | |
891 | ||
d8b57bb7 TG |
892 | if (pud_large(*pud)) |
893 | return spurious_fault_check(error_code, (pte_t *) pud); | |
894 | ||
5b727a3b JF |
895 | pmd = pmd_offset(pud, address); |
896 | if (!pmd_present(*pmd)) | |
897 | return 0; | |
898 | ||
d8b57bb7 TG |
899 | if (pmd_large(*pmd)) |
900 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
901 | ||
660a293e SL |
902 | /* |
903 | * Note: don't use pte_present() here, since it returns true | |
904 | * if the _PAGE_PROTNONE bit is set. However, this aliases the | |
905 | * _PAGE_GLOBAL bit, which for kernel pages give false positives | |
906 | * when CONFIG_DEBUG_PAGEALLOC is used. | |
907 | */ | |
5b727a3b | 908 | pte = pte_offset_kernel(pmd, address); |
660a293e | 909 | if (!(pte_flags(*pte) & _PAGE_PRESENT)) |
5b727a3b JF |
910 | return 0; |
911 | ||
3c3e5694 SR |
912 | ret = spurious_fault_check(error_code, pte); |
913 | if (!ret) | |
914 | return 0; | |
915 | ||
916 | /* | |
2d4a7167 IM |
917 | * Make sure we have permissions in PMD. |
918 | * If not, then there's a bug in the page tables: | |
3c3e5694 SR |
919 | */ |
920 | ret = spurious_fault_check(error_code, (pte_t *) pmd); | |
921 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | |
2d4a7167 | 922 | |
3c3e5694 | 923 | return ret; |
5b727a3b JF |
924 | } |
925 | ||
abd4f750 | 926 | int show_unhandled_signals = 1; |
1da177e4 | 927 | |
2d4a7167 | 928 | static inline int |
68da336a | 929 | access_error(unsigned long error_code, struct vm_area_struct *vma) |
92181f19 | 930 | { |
68da336a | 931 | if (error_code & PF_WRITE) { |
2d4a7167 | 932 | /* write, present and write, not present: */ |
92181f19 NP |
933 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
934 | return 1; | |
2d4a7167 | 935 | return 0; |
92181f19 NP |
936 | } |
937 | ||
2d4a7167 IM |
938 | /* read, present: */ |
939 | if (unlikely(error_code & PF_PROT)) | |
940 | return 1; | |
941 | ||
942 | /* read, not present: */ | |
943 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
944 | return 1; | |
945 | ||
92181f19 NP |
946 | return 0; |
947 | } | |
948 | ||
0973a06c HS |
949 | static int fault_in_kernel_space(unsigned long address) |
950 | { | |
d9517346 | 951 | return address >= TASK_SIZE_MAX; |
0973a06c HS |
952 | } |
953 | ||
1da177e4 LT |
954 | /* |
955 | * This routine handles page faults. It determines the address, | |
956 | * and the problem, and then passes it off to one of the appropriate | |
957 | * routines. | |
1da177e4 | 958 | */ |
c3731c68 IM |
959 | dotraplinkage void __kprobes |
960 | do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
1da177e4 | 961 | { |
2d4a7167 | 962 | struct vm_area_struct *vma; |
1da177e4 | 963 | struct task_struct *tsk; |
2d4a7167 | 964 | unsigned long address; |
1da177e4 | 965 | struct mm_struct *mm; |
f8c2ee22 | 966 | int fault; |
d065bd81 ML |
967 | int write = error_code & PF_WRITE; |
968 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | | |
969 | (write ? FAULT_FLAG_WRITE : 0); | |
1da177e4 | 970 | |
a9ba9a3b AV |
971 | tsk = current; |
972 | mm = tsk->mm; | |
2d4a7167 | 973 | |
2d4a7167 | 974 | /* Get the faulting address: */ |
f51c9452 | 975 | address = read_cr2(); |
1da177e4 | 976 | |
f8561296 VN |
977 | /* |
978 | * Detect and handle instructions that would cause a page fault for | |
979 | * both a tracked kernel page and a userspace page. | |
980 | */ | |
981 | if (kmemcheck_active(regs)) | |
982 | kmemcheck_hide(regs); | |
5dfaf90f | 983 | prefetchw(&mm->mmap_sem); |
f8561296 | 984 | |
0fd0e3da | 985 | if (unlikely(kmmio_fault(regs, address))) |
86069782 | 986 | return; |
1da177e4 LT |
987 | |
988 | /* | |
989 | * We fault-in kernel-space virtual memory on-demand. The | |
990 | * 'reference' page table is init_mm.pgd. | |
991 | * | |
992 | * NOTE! We MUST NOT take any locks for this case. We may | |
993 | * be in an interrupt or a critical region, and should | |
994 | * only copy the information from the master page table, | |
995 | * nothing more. | |
996 | * | |
997 | * This verifies that the fault happens in kernel space | |
998 | * (error_code & 4) == 0, and that the fault was not a | |
8b1bde93 | 999 | * protection error (error_code & 9) == 0. |
1da177e4 | 1000 | */ |
0973a06c | 1001 | if (unlikely(fault_in_kernel_space(address))) { |
f8561296 VN |
1002 | if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { |
1003 | if (vmalloc_fault(address) >= 0) | |
1004 | return; | |
1005 | ||
1006 | if (kmemcheck_fault(regs, address, error_code)) | |
1007 | return; | |
1008 | } | |
5b727a3b | 1009 | |
2d4a7167 | 1010 | /* Can handle a stale RO->RW TLB: */ |
92181f19 | 1011 | if (spurious_fault(error_code, address)) |
5b727a3b JF |
1012 | return; |
1013 | ||
2d4a7167 | 1014 | /* kprobes don't want to hook the spurious faults: */ |
9be260a6 MH |
1015 | if (notify_page_fault(regs)) |
1016 | return; | |
f8c2ee22 HH |
1017 | /* |
1018 | * Don't take the mm semaphore here. If we fixup a prefetch | |
2d4a7167 | 1019 | * fault we could otherwise deadlock: |
f8c2ee22 | 1020 | */ |
92181f19 | 1021 | bad_area_nosemaphore(regs, error_code, address); |
2d4a7167 | 1022 | |
92181f19 | 1023 | return; |
f8c2ee22 HH |
1024 | } |
1025 | ||
2d4a7167 | 1026 | /* kprobes don't want to hook the spurious faults: */ |
f8a6b2b9 | 1027 | if (unlikely(notify_page_fault(regs))) |
9be260a6 | 1028 | return; |
f8c2ee22 | 1029 | /* |
891cffbd LT |
1030 | * It's safe to allow irq's after cr2 has been saved and the |
1031 | * vmalloc fault has been handled. | |
1032 | * | |
1033 | * User-mode registers count as a user access even for any | |
2d4a7167 | 1034 | * potential system fault or CPU buglet: |
f8c2ee22 | 1035 | */ |
891cffbd LT |
1036 | if (user_mode_vm(regs)) { |
1037 | local_irq_enable(); | |
1038 | error_code |= PF_USER; | |
2d4a7167 IM |
1039 | } else { |
1040 | if (regs->flags & X86_EFLAGS_IF) | |
1041 | local_irq_enable(); | |
1042 | } | |
8c914cb7 | 1043 | |
66c58156 | 1044 | if (unlikely(error_code & PF_RSVD)) |
92181f19 | 1045 | pgtable_bad(regs, error_code, address); |
1da177e4 | 1046 | |
cdd6c482 | 1047 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
7dd1fcc2 | 1048 | |
1da177e4 | 1049 | /* |
2d4a7167 IM |
1050 | * If we're in an interrupt, have no user context or are running |
1051 | * in an atomic region then we must not take the fault: | |
1da177e4 | 1052 | */ |
92181f19 NP |
1053 | if (unlikely(in_atomic() || !mm)) { |
1054 | bad_area_nosemaphore(regs, error_code, address); | |
1055 | return; | |
1056 | } | |
1da177e4 | 1057 | |
3a1dfe6e IM |
1058 | /* |
1059 | * When running in the kernel we expect faults to occur only to | |
2d4a7167 IM |
1060 | * addresses in user space. All other faults represent errors in |
1061 | * the kernel and should generate an OOPS. Unfortunately, in the | |
1062 | * case of an erroneous fault occurring in a code path which already | |
1063 | * holds mmap_sem we will deadlock attempting to validate the fault | |
1064 | * against the address space. Luckily the kernel only validly | |
1065 | * references user space from well defined areas of code, which are | |
1066 | * listed in the exceptions table. | |
1da177e4 LT |
1067 | * |
1068 | * As the vast majority of faults will be valid we will only perform | |
2d4a7167 IM |
1069 | * the source reference check when there is a possibility of a |
1070 | * deadlock. Attempt to lock the address space, if we cannot we then | |
1071 | * validate the source. If this is invalid we can skip the address | |
1072 | * space check, thus avoiding the deadlock: | |
1da177e4 | 1073 | */ |
92181f19 | 1074 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
66c58156 | 1075 | if ((error_code & PF_USER) == 0 && |
92181f19 NP |
1076 | !search_exception_tables(regs->ip)) { |
1077 | bad_area_nosemaphore(regs, error_code, address); | |
1078 | return; | |
1079 | } | |
d065bd81 | 1080 | retry: |
1da177e4 | 1081 | down_read(&mm->mmap_sem); |
01006074 PZ |
1082 | } else { |
1083 | /* | |
2d4a7167 IM |
1084 | * The above down_read_trylock() might have succeeded in |
1085 | * which case we'll have missed the might_sleep() from | |
1086 | * down_read(): | |
01006074 PZ |
1087 | */ |
1088 | might_sleep(); | |
1da177e4 LT |
1089 | } |
1090 | ||
1091 | vma = find_vma(mm, address); | |
92181f19 NP |
1092 | if (unlikely(!vma)) { |
1093 | bad_area(regs, error_code, address); | |
1094 | return; | |
1095 | } | |
1096 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 1097 | goto good_area; |
92181f19 NP |
1098 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
1099 | bad_area(regs, error_code, address); | |
1100 | return; | |
1101 | } | |
33cb5243 | 1102 | if (error_code & PF_USER) { |
6f4d368e HH |
1103 | /* |
1104 | * Accessing the stack below %sp is always a bug. | |
1105 | * The large cushion allows instructions like enter | |
2d4a7167 | 1106 | * and pusha to work. ("enter $65535, $31" pushes |
6f4d368e | 1107 | * 32 pointers and then decrements %sp by 65535.) |
03fdc2c2 | 1108 | */ |
92181f19 NP |
1109 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
1110 | bad_area(regs, error_code, address); | |
1111 | return; | |
1112 | } | |
1da177e4 | 1113 | } |
92181f19 NP |
1114 | if (unlikely(expand_stack(vma, address))) { |
1115 | bad_area(regs, error_code, address); | |
1116 | return; | |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * Ok, we have a good vm_area for this memory access, so | |
1121 | * we can handle it.. | |
1122 | */ | |
1da177e4 | 1123 | good_area: |
68da336a | 1124 | if (unlikely(access_error(error_code, vma))) { |
92181f19 NP |
1125 | bad_area_access_error(regs, error_code, address); |
1126 | return; | |
1da177e4 LT |
1127 | } |
1128 | ||
1129 | /* | |
1130 | * If for any reason at all we couldn't handle the fault, | |
1131 | * make sure we exit gracefully rather than endlessly redo | |
2d4a7167 | 1132 | * the fault: |
1da177e4 | 1133 | */ |
d065bd81 | 1134 | fault = handle_mm_fault(mm, vma, address, flags); |
2d4a7167 | 1135 | |
83c54070 | 1136 | if (unlikely(fault & VM_FAULT_ERROR)) { |
92181f19 NP |
1137 | mm_fault_error(regs, error_code, address, fault); |
1138 | return; | |
1da177e4 | 1139 | } |
2d4a7167 | 1140 | |
d065bd81 ML |
1141 | /* |
1142 | * Major/minor page fault accounting is only done on the | |
1143 | * initial attempt. If we go through a retry, it is extremely | |
1144 | * likely that the page will be found in page cache at that point. | |
1145 | */ | |
1146 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
1147 | if (fault & VM_FAULT_MAJOR) { | |
1148 | tsk->maj_flt++; | |
1149 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
1150 | regs, address); | |
1151 | } else { | |
1152 | tsk->min_flt++; | |
1153 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
1154 | regs, address); | |
1155 | } | |
1156 | if (fault & VM_FAULT_RETRY) { | |
1157 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | |
1158 | * of starvation. */ | |
1159 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
1160 | goto retry; | |
1161 | } | |
ac17dc8e | 1162 | } |
d729ab35 | 1163 | |
8c938f9f IM |
1164 | check_v8086_mode(regs, address, tsk); |
1165 | ||
1da177e4 | 1166 | up_read(&mm->mmap_sem); |
1da177e4 | 1167 | } |