Merge tag 'mac80211-for-davem-2015-03-16' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / arch / x86 / mm / pgtable.c
CommitLineData
4f76cd38 1#include <linux/mm.h>
5a0e3ad6 2#include <linux/gfp.h>
4f76cd38 3#include <asm/pgalloc.h>
ee5aa8d3 4#include <asm/pgtable.h>
4f76cd38 5#include <asm/tlb.h>
a1d5a869 6#include <asm/fixmap.h>
4f76cd38 7
9e730237
VN
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
14315592
IC
10#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
4f76cd38
JF
18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
9e730237 20 return (pte_t *)__get_free_page(PGALLOC_GFP);
4f76cd38
JF
21}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
14315592 27 pte = alloc_pages(__userpte_alloc_gfp, 0);
cecbd1b5
KS
28 if (!pte)
29 return NULL;
30 if (!pgtable_page_ctor(pte)) {
31 __free_page(pte);
32 return NULL;
33 }
4f76cd38
JF
34 return pte;
35}
36
14315592
IC
37static int __init setup_userpte(char *arg)
38{
39 if (!arg)
40 return -EINVAL;
41
42 /*
43 * "userpte=nohigh" disables allocation of user pagetables in
44 * high memory.
45 */
46 if (strcmp(arg, "nohigh") == 0)
47 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
48 else
49 return -EINVAL;
50 return 0;
51}
52early_param("userpte", setup_userpte);
53
9e1b32ca 54void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
397f687a
JF
55{
56 pgtable_page_dtor(pte);
6944a9c8 57 paravirt_release_pte(page_to_pfn(pte));
397f687a
JF
58 tlb_remove_page(tlb, pte);
59}
60
170fdff7 61#if PAGETABLE_LEVELS > 2
9e1b32ca 62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
170fdff7 63{
c283610e 64 struct page *page = virt_to_page(pmd);
6944a9c8 65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
1de14c3c
DH
66 /*
67 * NOTE! For PAE, any changes to the top page-directory-pointer-table
68 * entries need a full cr3 reload to flush.
69 */
70#ifdef CONFIG_X86_PAE
71 tlb->need_flush_all = 1;
72#endif
c283610e
KS
73 pgtable_pmd_page_dtor(page);
74 tlb_remove_page(tlb, page);
170fdff7 75}
5a5f8f42
JF
76
77#if PAGETABLE_LEVELS > 3
9e1b32ca 78void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
5a5f8f42 79{
2761fa09 80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
5a5f8f42
JF
81 tlb_remove_page(tlb, virt_to_page(pud));
82}
83#endif /* PAGETABLE_LEVELS > 3 */
170fdff7
JF
84#endif /* PAGETABLE_LEVELS > 2 */
85
4f76cd38
JF
86static inline void pgd_list_add(pgd_t *pgd)
87{
88 struct page *page = virt_to_page(pgd);
4f76cd38 89
4f76cd38 90 list_add(&page->lru, &pgd_list);
4f76cd38
JF
91}
92
93static inline void pgd_list_del(pgd_t *pgd)
94{
95 struct page *page = virt_to_page(pgd);
4f76cd38 96
4f76cd38 97 list_del(&page->lru);
4f76cd38
JF
98}
99
4f76cd38 100#define UNSHARED_PTRS_PER_PGD \
68db065c 101 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
4f76cd38 102
617d34d9
JF
103
104static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
105{
106 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
107 virt_to_page(pgd)->index = (pgoff_t)mm;
108}
109
110struct mm_struct *pgd_page_get_mm(struct page *page)
111{
112 return (struct mm_struct *)page->index;
113}
114
115static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
4f76cd38 116{
4f76cd38
JF
117 /* If the pgd points to a shared pagetable level (either the
118 ptes in non-PAE, or shared PMD in PAE), then just copy the
119 references from swapper_pg_dir. */
120 if (PAGETABLE_LEVELS == 2 ||
85958b46
JF
121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
122 PAGETABLE_LEVELS == 4) {
68db065c
JF
123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
124 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
4f76cd38 125 KERNEL_PGD_PTRS);
4f76cd38
JF
126 }
127
128 /* list required to sync kernel mapping updates */
617d34d9
JF
129 if (!SHARED_KERNEL_PMD) {
130 pgd_set_mm(pgd, mm);
4f76cd38 131 pgd_list_add(pgd);
617d34d9 132 }
4f76cd38
JF
133}
134
17b74627 135static void pgd_dtor(pgd_t *pgd)
4f76cd38 136{
4f76cd38
JF
137 if (SHARED_KERNEL_PMD)
138 return;
139
a79e53d8 140 spin_lock(&pgd_lock);
4f76cd38 141 pgd_list_del(pgd);
a79e53d8 142 spin_unlock(&pgd_lock);
4f76cd38
JF
143}
144
85958b46
JF
145/*
146 * List of all pgd's needed for non-PAE so it can invalidate entries
147 * in both cached and uncached pgd's; not needed for PAE since the
148 * kernel pmd is shared. If PAE were not to share the pmd a similar
149 * tactic would be needed. This is essentially codepath-based locking
150 * against pageattr.c; it is the unique case in which a valid change
151 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
152 * vmalloc faults work because attached pagetables are never freed.
6d49e352 153 * -- nyc
85958b46
JF
154 */
155
4f76cd38 156#ifdef CONFIG_X86_PAE
d8d5900e
JF
157/*
158 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
159 * updating the top-level pagetable entries to guarantee the
160 * processor notices the update. Since this is expensive, and
161 * all 4 top-level entries are used almost immediately in a
162 * new process's life, we just pre-populate them here.
163 *
164 * Also, if we're in a paravirt environment where the kernel pmd is
165 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
166 * and initialize the kernel pmds here.
167 */
168#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
169
170void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
171{
172 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
173
174 /* Note: almost everything apart from _PAGE_PRESENT is
175 reserved at the pmd (PDPT) level. */
176 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
177
178 /*
179 * According to Intel App note "TLBs, Paging-Structure Caches,
180 * and Their Invalidation", April 2007, document 317080-001,
181 * section 8.1: in PAE mode we explicitly have to flush the
182 * TLB via cr3 if the top-level pgd is changed...
183 */
4981d01e 184 flush_tlb_mm(mm);
d8d5900e
JF
185}
186#else /* !CONFIG_X86_PAE */
187
188/* No need to prepopulate any pagetable entries in non-PAE modes. */
189#define PREALLOCATED_PMDS 0
190
191#endif /* CONFIG_X86_PAE */
192
dc6c9a35 193static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
d8d5900e
JF
194{
195 int i;
196
197 for(i = 0; i < PREALLOCATED_PMDS; i++)
09ef4939
KS
198 if (pmds[i]) {
199 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
d8d5900e 200 free_page((unsigned long)pmds[i]);
dc6c9a35 201 mm_dec_nr_pmds(mm);
09ef4939 202 }
d8d5900e
JF
203}
204
dc6c9a35 205static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
d8d5900e
JF
206{
207 int i;
208 bool failed = false;
209
210 for(i = 0; i < PREALLOCATED_PMDS; i++) {
9e730237 211 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
09ef4939 212 if (!pmd)
d8d5900e 213 failed = true;
09ef4939 214 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
2a46eed5 215 free_page((unsigned long)pmd);
09ef4939
KS
216 pmd = NULL;
217 failed = true;
218 }
dc6c9a35
KS
219 if (pmd)
220 mm_inc_nr_pmds(mm);
d8d5900e
JF
221 pmds[i] = pmd;
222 }
223
224 if (failed) {
dc6c9a35 225 free_pmds(mm, pmds);
d8d5900e
JF
226 return -ENOMEM;
227 }
228
229 return 0;
230}
231
4f76cd38
JF
232/*
233 * Mop up any pmd pages which may still be attached to the pgd.
234 * Normally they will be freed by munmap/exit_mmap, but any pmd we
235 * preallocate which never got a corresponding vma will need to be
236 * freed manually.
237 */
238static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
239{
240 int i;
241
d8d5900e 242 for(i = 0; i < PREALLOCATED_PMDS; i++) {
4f76cd38
JF
243 pgd_t pgd = pgdp[i];
244
245 if (pgd_val(pgd) != 0) {
246 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
247
248 pgdp[i] = native_make_pgd(0);
249
6944a9c8 250 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
4f76cd38 251 pmd_free(mm, pmd);
dc6c9a35 252 mm_dec_nr_pmds(mm);
4f76cd38
JF
253 }
254 }
255}
256
d8d5900e 257static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
4f76cd38
JF
258{
259 pud_t *pud;
4f76cd38
JF
260 int i;
261
cf3e5050
JF
262 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
263 return;
264
4f76cd38 265 pud = pud_offset(pgd, 0);
4f76cd38 266
73b44ff4 267 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
d8d5900e 268 pmd_t *pmd = pmds[i];
4f76cd38 269
68db065c 270 if (i >= KERNEL_PGD_BOUNDARY)
4f76cd38
JF
271 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
272 sizeof(pmd_t) * PTRS_PER_PMD);
273
274 pud_populate(mm, pud, pmd);
275 }
4f76cd38 276}
1ec1fe73 277
d8d5900e 278pgd_t *pgd_alloc(struct mm_struct *mm)
1ec1fe73 279{
d8d5900e
JF
280 pgd_t *pgd;
281 pmd_t *pmds[PREALLOCATED_PMDS];
1ec1fe73 282
9e730237 283 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
d8d5900e
JF
284
285 if (pgd == NULL)
286 goto out;
287
288 mm->pgd = pgd;
289
dc6c9a35 290 if (preallocate_pmds(mm, pmds) != 0)
d8d5900e
JF
291 goto out_free_pgd;
292
293 if (paravirt_pgd_alloc(mm) != 0)
294 goto out_free_pmds;
1ec1fe73
IM
295
296 /*
d8d5900e
JF
297 * Make sure that pre-populating the pmds is atomic with
298 * respect to anything walking the pgd_list, so that they
299 * never see a partially populated pgd.
1ec1fe73 300 */
a79e53d8 301 spin_lock(&pgd_lock);
4f76cd38 302
617d34d9 303 pgd_ctor(mm, pgd);
d8d5900e 304 pgd_prepopulate_pmd(mm, pgd, pmds);
4f76cd38 305
a79e53d8 306 spin_unlock(&pgd_lock);
4f76cd38
JF
307
308 return pgd;
d8d5900e
JF
309
310out_free_pmds:
dc6c9a35 311 free_pmds(mm, pmds);
d8d5900e
JF
312out_free_pgd:
313 free_page((unsigned long)pgd);
314out:
315 return NULL;
4f76cd38
JF
316}
317
318void pgd_free(struct mm_struct *mm, pgd_t *pgd)
319{
320 pgd_mop_up_pmds(mm, pgd);
321 pgd_dtor(pgd);
eba0045f 322 paravirt_pgd_free(mm, pgd);
4f76cd38
JF
323 free_page((unsigned long)pgd);
324}
ee5aa8d3 325
0f9a921c
RR
326/*
327 * Used to set accessed or dirty bits in the page table entries
328 * on other architectures. On x86, the accessed and dirty bits
329 * are tracked by hardware. However, do_wp_page calls this function
330 * to also make the pte writeable at the same time the dirty bit is
331 * set. In that case we do actually need to write the PTE.
332 */
ee5aa8d3
JF
333int ptep_set_access_flags(struct vm_area_struct *vma,
334 unsigned long address, pte_t *ptep,
335 pte_t entry, int dirty)
336{
337 int changed = !pte_same(*ptep, entry);
338
339 if (changed && dirty) {
340 *ptep = entry;
341 pte_update_defer(vma->vm_mm, address, ptep);
ee5aa8d3
JF
342 }
343
344 return changed;
345}
f9fbf1a3 346
db3eb96f
AA
347#ifdef CONFIG_TRANSPARENT_HUGEPAGE
348int pmdp_set_access_flags(struct vm_area_struct *vma,
349 unsigned long address, pmd_t *pmdp,
350 pmd_t entry, int dirty)
351{
352 int changed = !pmd_same(*pmdp, entry);
353
354 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
355
356 if (changed && dirty) {
357 *pmdp = entry;
358 pmd_update_defer(vma->vm_mm, address, pmdp);
5e4bf1a5
IM
359 /*
360 * We had a write-protection fault here and changed the pmd
361 * to to more permissive. No need to flush the TLB for that,
362 * #PF is architecturally guaranteed to do that and in the
363 * worst-case we'll generate a spurious fault.
364 */
db3eb96f
AA
365 }
366
367 return changed;
368}
369#endif
370
f9fbf1a3
JF
371int ptep_test_and_clear_young(struct vm_area_struct *vma,
372 unsigned long addr, pte_t *ptep)
373{
374 int ret = 0;
375
376 if (pte_young(*ptep))
377 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
48e23957 378 (unsigned long *) &ptep->pte);
f9fbf1a3
JF
379
380 if (ret)
381 pte_update(vma->vm_mm, addr, ptep);
382
383 return ret;
384}
c20311e1 385
db3eb96f
AA
386#ifdef CONFIG_TRANSPARENT_HUGEPAGE
387int pmdp_test_and_clear_young(struct vm_area_struct *vma,
388 unsigned long addr, pmd_t *pmdp)
389{
390 int ret = 0;
391
392 if (pmd_young(*pmdp))
393 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
f2d6bfe9 394 (unsigned long *)pmdp);
db3eb96f
AA
395
396 if (ret)
397 pmd_update(vma->vm_mm, addr, pmdp);
398
399 return ret;
400}
401#endif
402
c20311e1
JF
403int ptep_clear_flush_young(struct vm_area_struct *vma,
404 unsigned long address, pte_t *ptep)
405{
b13b1d2d
SL
406 /*
407 * On x86 CPUs, clearing the accessed bit without a TLB flush
408 * doesn't cause data corruption. [ It could cause incorrect
409 * page aging and the (mistaken) reclaim of hot pages, but the
410 * chance of that should be relatively low. ]
411 *
412 * So as a performance optimization don't flush the TLB when
413 * clearing the accessed bit, it will eventually be flushed by
414 * a context switch or a VM operation anyway. [ In the rare
415 * event of it not getting flushed for a long time the delay
416 * shouldn't really matter because there's no real memory
417 * pressure for swapout to react to. ]
418 */
419 return ptep_test_and_clear_young(vma, address, ptep);
c20311e1 420}
7c7e6e07 421
db3eb96f
AA
422#ifdef CONFIG_TRANSPARENT_HUGEPAGE
423int pmdp_clear_flush_young(struct vm_area_struct *vma,
424 unsigned long address, pmd_t *pmdp)
425{
426 int young;
427
428 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
429
430 young = pmdp_test_and_clear_young(vma, address, pmdp);
431 if (young)
432 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
433
434 return young;
435}
436
437void pmdp_splitting_flush(struct vm_area_struct *vma,
438 unsigned long address, pmd_t *pmdp)
439{
440 int set;
441 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
442 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
f2d6bfe9 443 (unsigned long *)pmdp);
db3eb96f
AA
444 if (set) {
445 pmd_update(vma->vm_mm, address, pmdp);
446 /* need tlb flush only to serialize against gup-fast */
447 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
448 }
449}
450#endif
451
fd862dde
GP
452/**
453 * reserve_top_address - reserves a hole in the top of kernel address space
454 * @reserve - size of hole to reserve
455 *
456 * Can be used to relocate the fixmap area and poke a hole in the top
457 * of kernel address space to make room for a hypervisor.
458 */
459void __init reserve_top_address(unsigned long reserve)
460{
461#ifdef CONFIG_X86_32
462 BUG_ON(fixmaps_set > 0);
73159fdc
AL
463 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
464 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
465 -reserve, __FIXADDR_TOP + PAGE_SIZE);
fd862dde
GP
466#endif
467}
468
7c7e6e07
JF
469int fixmaps_set;
470
aeaaa59c 471void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
7c7e6e07
JF
472{
473 unsigned long address = __fix_to_virt(idx);
474
475 if (idx >= __end_of_fixed_addresses) {
476 BUG();
477 return;
478 }
aeaaa59c 479 set_pte_vaddr(address, pte);
7c7e6e07
JF
480 fixmaps_set++;
481}
aeaaa59c 482
3b3809ac
MH
483void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
484 pgprot_t flags)
aeaaa59c
JF
485{
486 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
487}
This page took 0.406677 seconds and 5 git commands to generate.