Merge branch 'for-linus-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / x86 / mm / pgtable.c
1 #include <linux/mm.h>
2 #include <linux/gfp.h>
3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
5 #include <asm/tlb.h>
6 #include <asm/fixmap.h>
7 #include <asm/mtrr.h>
8
9 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
10
11 #ifdef CONFIG_HIGHPTE
12 #define PGALLOC_USER_GFP __GFP_HIGHMEM
13 #else
14 #define PGALLOC_USER_GFP 0
15 #endif
16
17 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
18
19 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
20 {
21 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
22 }
23
24 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25 {
26 struct page *pte;
27
28 pte = alloc_pages(__userpte_alloc_gfp, 0);
29 if (!pte)
30 return NULL;
31 if (!pgtable_page_ctor(pte)) {
32 __free_page(pte);
33 return NULL;
34 }
35 return pte;
36 }
37
38 static int __init setup_userpte(char *arg)
39 {
40 if (!arg)
41 return -EINVAL;
42
43 /*
44 * "userpte=nohigh" disables allocation of user pagetables in
45 * high memory.
46 */
47 if (strcmp(arg, "nohigh") == 0)
48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
49 else
50 return -EINVAL;
51 return 0;
52 }
53 early_param("userpte", setup_userpte);
54
55 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
56 {
57 pgtable_page_dtor(pte);
58 paravirt_release_pte(page_to_pfn(pte));
59 tlb_remove_page(tlb, pte);
60 }
61
62 #if CONFIG_PGTABLE_LEVELS > 2
63 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
64 {
65 struct page *page = virt_to_page(pmd);
66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
67 /*
68 * NOTE! For PAE, any changes to the top page-directory-pointer-table
69 * entries need a full cr3 reload to flush.
70 */
71 #ifdef CONFIG_X86_PAE
72 tlb->need_flush_all = 1;
73 #endif
74 pgtable_pmd_page_dtor(page);
75 tlb_remove_page(tlb, page);
76 }
77
78 #if CONFIG_PGTABLE_LEVELS > 3
79 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
80 {
81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
82 tlb_remove_page(tlb, virt_to_page(pud));
83 }
84 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
85 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
86
87 static inline void pgd_list_add(pgd_t *pgd)
88 {
89 struct page *page = virt_to_page(pgd);
90
91 list_add(&page->lru, &pgd_list);
92 }
93
94 static inline void pgd_list_del(pgd_t *pgd)
95 {
96 struct page *page = virt_to_page(pgd);
97
98 list_del(&page->lru);
99 }
100
101 #define UNSHARED_PTRS_PER_PGD \
102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
103
104
105 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
106 {
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 virt_to_page(pgd)->index = (pgoff_t)mm;
109 }
110
111 struct mm_struct *pgd_page_get_mm(struct page *page)
112 {
113 return (struct mm_struct *)page->index;
114 }
115
116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
117 {
118 /* If the pgd points to a shared pagetable level (either the
119 ptes in non-PAE, or shared PMD in PAE), then just copy the
120 references from swapper_pg_dir. */
121 if (CONFIG_PGTABLE_LEVELS == 2 ||
122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 CONFIG_PGTABLE_LEVELS == 4) {
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
126 KERNEL_PGD_PTRS);
127 }
128
129 /* list required to sync kernel mapping updates */
130 if (!SHARED_KERNEL_PMD) {
131 pgd_set_mm(pgd, mm);
132 pgd_list_add(pgd);
133 }
134 }
135
136 static void pgd_dtor(pgd_t *pgd)
137 {
138 if (SHARED_KERNEL_PMD)
139 return;
140
141 spin_lock(&pgd_lock);
142 pgd_list_del(pgd);
143 spin_unlock(&pgd_lock);
144 }
145
146 /*
147 * List of all pgd's needed for non-PAE so it can invalidate entries
148 * in both cached and uncached pgd's; not needed for PAE since the
149 * kernel pmd is shared. If PAE were not to share the pmd a similar
150 * tactic would be needed. This is essentially codepath-based locking
151 * against pageattr.c; it is the unique case in which a valid change
152 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
153 * vmalloc faults work because attached pagetables are never freed.
154 * -- nyc
155 */
156
157 #ifdef CONFIG_X86_PAE
158 /*
159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
160 * updating the top-level pagetable entries to guarantee the
161 * processor notices the update. Since this is expensive, and
162 * all 4 top-level entries are used almost immediately in a
163 * new process's life, we just pre-populate them here.
164 *
165 * Also, if we're in a paravirt environment where the kernel pmd is
166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
167 * and initialize the kernel pmds here.
168 */
169 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
170
171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
172 {
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
174
175 /* Note: almost everything apart from _PAGE_PRESENT is
176 reserved at the pmd (PDPT) level. */
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
178
179 /*
180 * According to Intel App note "TLBs, Paging-Structure Caches,
181 * and Their Invalidation", April 2007, document 317080-001,
182 * section 8.1: in PAE mode we explicitly have to flush the
183 * TLB via cr3 if the top-level pgd is changed...
184 */
185 flush_tlb_mm(mm);
186 }
187 #else /* !CONFIG_X86_PAE */
188
189 /* No need to prepopulate any pagetable entries in non-PAE modes. */
190 #define PREALLOCATED_PMDS 0
191
192 #endif /* CONFIG_X86_PAE */
193
194 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
195 {
196 int i;
197
198 for(i = 0; i < PREALLOCATED_PMDS; i++)
199 if (pmds[i]) {
200 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
201 free_page((unsigned long)pmds[i]);
202 mm_dec_nr_pmds(mm);
203 }
204 }
205
206 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
207 {
208 int i;
209 bool failed = false;
210 gfp_t gfp = PGALLOC_GFP;
211
212 if (mm == &init_mm)
213 gfp &= ~__GFP_ACCOUNT;
214
215 for(i = 0; i < PREALLOCATED_PMDS; i++) {
216 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
217 if (!pmd)
218 failed = true;
219 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
220 free_page((unsigned long)pmd);
221 pmd = NULL;
222 failed = true;
223 }
224 if (pmd)
225 mm_inc_nr_pmds(mm);
226 pmds[i] = pmd;
227 }
228
229 if (failed) {
230 free_pmds(mm, pmds);
231 return -ENOMEM;
232 }
233
234 return 0;
235 }
236
237 /*
238 * Mop up any pmd pages which may still be attached to the pgd.
239 * Normally they will be freed by munmap/exit_mmap, but any pmd we
240 * preallocate which never got a corresponding vma will need to be
241 * freed manually.
242 */
243 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
244 {
245 int i;
246
247 for(i = 0; i < PREALLOCATED_PMDS; i++) {
248 pgd_t pgd = pgdp[i];
249
250 if (pgd_val(pgd) != 0) {
251 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
252
253 pgdp[i] = native_make_pgd(0);
254
255 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
256 pmd_free(mm, pmd);
257 mm_dec_nr_pmds(mm);
258 }
259 }
260 }
261
262 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
263 {
264 pud_t *pud;
265 int i;
266
267 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
268 return;
269
270 pud = pud_offset(pgd, 0);
271
272 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
273 pmd_t *pmd = pmds[i];
274
275 if (i >= KERNEL_PGD_BOUNDARY)
276 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
277 sizeof(pmd_t) * PTRS_PER_PMD);
278
279 pud_populate(mm, pud, pmd);
280 }
281 }
282
283 /*
284 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
285 * assumes that pgd should be in one page.
286 *
287 * But kernel with PAE paging that is not running as a Xen domain
288 * only needs to allocate 32 bytes for pgd instead of one page.
289 */
290 #ifdef CONFIG_X86_PAE
291
292 #include <linux/slab.h>
293
294 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
295 #define PGD_ALIGN 32
296
297 static struct kmem_cache *pgd_cache;
298
299 static int __init pgd_cache_init(void)
300 {
301 /*
302 * When PAE kernel is running as a Xen domain, it does not use
303 * shared kernel pmd. And this requires a whole page for pgd.
304 */
305 if (!SHARED_KERNEL_PMD)
306 return 0;
307
308 /*
309 * when PAE kernel is not running as a Xen domain, it uses
310 * shared kernel pmd. Shared kernel pmd does not require a whole
311 * page for pgd. We are able to just allocate a 32-byte for pgd.
312 * During boot time, we create a 32-byte slab for pgd table allocation.
313 */
314 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
315 SLAB_PANIC, NULL);
316 if (!pgd_cache)
317 return -ENOMEM;
318
319 return 0;
320 }
321 core_initcall(pgd_cache_init);
322
323 static inline pgd_t *_pgd_alloc(void)
324 {
325 /*
326 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
327 * We allocate one page for pgd.
328 */
329 if (!SHARED_KERNEL_PMD)
330 return (pgd_t *)__get_free_page(PGALLOC_GFP);
331
332 /*
333 * Now PAE kernel is not running as a Xen domain. We can allocate
334 * a 32-byte slab for pgd to save memory space.
335 */
336 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
337 }
338
339 static inline void _pgd_free(pgd_t *pgd)
340 {
341 if (!SHARED_KERNEL_PMD)
342 free_page((unsigned long)pgd);
343 else
344 kmem_cache_free(pgd_cache, pgd);
345 }
346 #else
347 static inline pgd_t *_pgd_alloc(void)
348 {
349 return (pgd_t *)__get_free_page(PGALLOC_GFP);
350 }
351
352 static inline void _pgd_free(pgd_t *pgd)
353 {
354 free_page((unsigned long)pgd);
355 }
356 #endif /* CONFIG_X86_PAE */
357
358 pgd_t *pgd_alloc(struct mm_struct *mm)
359 {
360 pgd_t *pgd;
361 pmd_t *pmds[PREALLOCATED_PMDS];
362
363 pgd = _pgd_alloc();
364
365 if (pgd == NULL)
366 goto out;
367
368 mm->pgd = pgd;
369
370 if (preallocate_pmds(mm, pmds) != 0)
371 goto out_free_pgd;
372
373 if (paravirt_pgd_alloc(mm) != 0)
374 goto out_free_pmds;
375
376 /*
377 * Make sure that pre-populating the pmds is atomic with
378 * respect to anything walking the pgd_list, so that they
379 * never see a partially populated pgd.
380 */
381 spin_lock(&pgd_lock);
382
383 pgd_ctor(mm, pgd);
384 pgd_prepopulate_pmd(mm, pgd, pmds);
385
386 spin_unlock(&pgd_lock);
387
388 return pgd;
389
390 out_free_pmds:
391 free_pmds(mm, pmds);
392 out_free_pgd:
393 _pgd_free(pgd);
394 out:
395 return NULL;
396 }
397
398 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
399 {
400 pgd_mop_up_pmds(mm, pgd);
401 pgd_dtor(pgd);
402 paravirt_pgd_free(mm, pgd);
403 _pgd_free(pgd);
404 }
405
406 /*
407 * Used to set accessed or dirty bits in the page table entries
408 * on other architectures. On x86, the accessed and dirty bits
409 * are tracked by hardware. However, do_wp_page calls this function
410 * to also make the pte writeable at the same time the dirty bit is
411 * set. In that case we do actually need to write the PTE.
412 */
413 int ptep_set_access_flags(struct vm_area_struct *vma,
414 unsigned long address, pte_t *ptep,
415 pte_t entry, int dirty)
416 {
417 int changed = !pte_same(*ptep, entry);
418
419 if (changed && dirty) {
420 *ptep = entry;
421 pte_update(vma->vm_mm, address, ptep);
422 }
423
424 return changed;
425 }
426
427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
428 int pmdp_set_access_flags(struct vm_area_struct *vma,
429 unsigned long address, pmd_t *pmdp,
430 pmd_t entry, int dirty)
431 {
432 int changed = !pmd_same(*pmdp, entry);
433
434 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
435
436 if (changed && dirty) {
437 *pmdp = entry;
438 /*
439 * We had a write-protection fault here and changed the pmd
440 * to to more permissive. No need to flush the TLB for that,
441 * #PF is architecturally guaranteed to do that and in the
442 * worst-case we'll generate a spurious fault.
443 */
444 }
445
446 return changed;
447 }
448 #endif
449
450 int ptep_test_and_clear_young(struct vm_area_struct *vma,
451 unsigned long addr, pte_t *ptep)
452 {
453 int ret = 0;
454
455 if (pte_young(*ptep))
456 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
457 (unsigned long *) &ptep->pte);
458
459 if (ret)
460 pte_update(vma->vm_mm, addr, ptep);
461
462 return ret;
463 }
464
465 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
466 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
467 unsigned long addr, pmd_t *pmdp)
468 {
469 int ret = 0;
470
471 if (pmd_young(*pmdp))
472 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
473 (unsigned long *)pmdp);
474
475 return ret;
476 }
477 #endif
478
479 int ptep_clear_flush_young(struct vm_area_struct *vma,
480 unsigned long address, pte_t *ptep)
481 {
482 /*
483 * On x86 CPUs, clearing the accessed bit without a TLB flush
484 * doesn't cause data corruption. [ It could cause incorrect
485 * page aging and the (mistaken) reclaim of hot pages, but the
486 * chance of that should be relatively low. ]
487 *
488 * So as a performance optimization don't flush the TLB when
489 * clearing the accessed bit, it will eventually be flushed by
490 * a context switch or a VM operation anyway. [ In the rare
491 * event of it not getting flushed for a long time the delay
492 * shouldn't really matter because there's no real memory
493 * pressure for swapout to react to. ]
494 */
495 return ptep_test_and_clear_young(vma, address, ptep);
496 }
497
498 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
499 int pmdp_clear_flush_young(struct vm_area_struct *vma,
500 unsigned long address, pmd_t *pmdp)
501 {
502 int young;
503
504 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
505
506 young = pmdp_test_and_clear_young(vma, address, pmdp);
507 if (young)
508 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
509
510 return young;
511 }
512 #endif
513
514 /**
515 * reserve_top_address - reserves a hole in the top of kernel address space
516 * @reserve - size of hole to reserve
517 *
518 * Can be used to relocate the fixmap area and poke a hole in the top
519 * of kernel address space to make room for a hypervisor.
520 */
521 void __init reserve_top_address(unsigned long reserve)
522 {
523 #ifdef CONFIG_X86_32
524 BUG_ON(fixmaps_set > 0);
525 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
526 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
527 -reserve, __FIXADDR_TOP + PAGE_SIZE);
528 #endif
529 }
530
531 int fixmaps_set;
532
533 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
534 {
535 unsigned long address = __fix_to_virt(idx);
536
537 if (idx >= __end_of_fixed_addresses) {
538 BUG();
539 return;
540 }
541 set_pte_vaddr(address, pte);
542 fixmaps_set++;
543 }
544
545 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
546 pgprot_t flags)
547 {
548 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
549 }
550
551 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
552 /**
553 * pud_set_huge - setup kernel PUD mapping
554 *
555 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
556 * function sets up a huge page only if any of the following conditions are met:
557 *
558 * - MTRRs are disabled, or
559 *
560 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
561 *
562 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
563 * has no effect on the requested PAT memory type.
564 *
565 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
566 * page mapping attempt fails.
567 *
568 * Returns 1 on success and 0 on failure.
569 */
570 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
571 {
572 u8 mtrr, uniform;
573
574 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
575 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
576 (mtrr != MTRR_TYPE_WRBACK))
577 return 0;
578
579 prot = pgprot_4k_2_large(prot);
580
581 set_pte((pte_t *)pud, pfn_pte(
582 (u64)addr >> PAGE_SHIFT,
583 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
584
585 return 1;
586 }
587
588 /**
589 * pmd_set_huge - setup kernel PMD mapping
590 *
591 * See text over pud_set_huge() above.
592 *
593 * Returns 1 on success and 0 on failure.
594 */
595 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
596 {
597 u8 mtrr, uniform;
598
599 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
600 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
601 (mtrr != MTRR_TYPE_WRBACK)) {
602 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
603 __func__, addr, addr + PMD_SIZE);
604 return 0;
605 }
606
607 prot = pgprot_4k_2_large(prot);
608
609 set_pte((pte_t *)pmd, pfn_pte(
610 (u64)addr >> PAGE_SHIFT,
611 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
612
613 return 1;
614 }
615
616 /**
617 * pud_clear_huge - clear kernel PUD mapping when it is set
618 *
619 * Returns 1 on success and 0 on failure (no PUD map is found).
620 */
621 int pud_clear_huge(pud_t *pud)
622 {
623 if (pud_large(*pud)) {
624 pud_clear(pud);
625 return 1;
626 }
627
628 return 0;
629 }
630
631 /**
632 * pmd_clear_huge - clear kernel PMD mapping when it is set
633 *
634 * Returns 1 on success and 0 on failure (no PMD map is found).
635 */
636 int pmd_clear_huge(pmd_t *pmd)
637 {
638 if (pmd_large(*pmd)) {
639 pmd_clear(pmd);
640 return 1;
641 }
642
643 return 0;
644 }
645 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
This page took 0.043074 seconds and 5 git commands to generate.