3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
6 #include <asm/fixmap.h>
8 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
11 #define PGALLOC_USER_GFP __GFP_HIGHMEM
13 #define PGALLOC_USER_GFP 0
16 gfp_t __userpte_alloc_gfp
= PGALLOC_GFP
| PGALLOC_USER_GFP
;
18 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
20 return (pte_t
*)__get_free_page(PGALLOC_GFP
);
23 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
27 pte
= alloc_pages(__userpte_alloc_gfp
, 0);
30 if (!pgtable_page_ctor(pte
)) {
37 static int __init
setup_userpte(char *arg
)
43 * "userpte=nohigh" disables allocation of user pagetables in
46 if (strcmp(arg
, "nohigh") == 0)
47 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
52 early_param("userpte", setup_userpte
);
54 void ___pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
56 pgtable_page_dtor(pte
);
57 paravirt_release_pte(page_to_pfn(pte
));
58 tlb_remove_page(tlb
, pte
);
61 #if PAGETABLE_LEVELS > 2
62 void ___pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
64 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
70 tlb
->need_flush_all
= 1;
72 tlb_remove_page(tlb
, virt_to_page(pmd
));
75 #if PAGETABLE_LEVELS > 3
76 void ___pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
78 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
79 tlb_remove_page(tlb
, virt_to_page(pud
));
81 #endif /* PAGETABLE_LEVELS > 3 */
82 #endif /* PAGETABLE_LEVELS > 2 */
84 static inline void pgd_list_add(pgd_t
*pgd
)
86 struct page
*page
= virt_to_page(pgd
);
88 list_add(&page
->lru
, &pgd_list
);
91 static inline void pgd_list_del(pgd_t
*pgd
)
93 struct page
*page
= virt_to_page(pgd
);
98 #define UNSHARED_PTRS_PER_PGD \
99 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
102 static void pgd_set_mm(pgd_t
*pgd
, struct mm_struct
*mm
)
104 BUILD_BUG_ON(sizeof(virt_to_page(pgd
)->index
) < sizeof(mm
));
105 virt_to_page(pgd
)->index
= (pgoff_t
)mm
;
108 struct mm_struct
*pgd_page_get_mm(struct page
*page
)
110 return (struct mm_struct
*)page
->index
;
113 static void pgd_ctor(struct mm_struct
*mm
, pgd_t
*pgd
)
115 /* If the pgd points to a shared pagetable level (either the
116 ptes in non-PAE, or shared PMD in PAE), then just copy the
117 references from swapper_pg_dir. */
118 if (PAGETABLE_LEVELS
== 2 ||
119 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
) ||
120 PAGETABLE_LEVELS
== 4) {
121 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
122 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
126 /* list required to sync kernel mapping updates */
127 if (!SHARED_KERNEL_PMD
) {
133 static void pgd_dtor(pgd_t
*pgd
)
135 if (SHARED_KERNEL_PMD
)
138 spin_lock(&pgd_lock
);
140 spin_unlock(&pgd_lock
);
144 * List of all pgd's needed for non-PAE so it can invalidate entries
145 * in both cached and uncached pgd's; not needed for PAE since the
146 * kernel pmd is shared. If PAE were not to share the pmd a similar
147 * tactic would be needed. This is essentially codepath-based locking
148 * against pageattr.c; it is the unique case in which a valid change
149 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
150 * vmalloc faults work because attached pagetables are never freed.
154 #ifdef CONFIG_X86_PAE
156 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
157 * updating the top-level pagetable entries to guarantee the
158 * processor notices the update. Since this is expensive, and
159 * all 4 top-level entries are used almost immediately in a
160 * new process's life, we just pre-populate them here.
162 * Also, if we're in a paravirt environment where the kernel pmd is
163 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
164 * and initialize the kernel pmds here.
166 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
168 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
170 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
172 /* Note: almost everything apart from _PAGE_PRESENT is
173 reserved at the pmd (PDPT) level. */
174 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
177 * According to Intel App note "TLBs, Paging-Structure Caches,
178 * and Their Invalidation", April 2007, document 317080-001,
179 * section 8.1: in PAE mode we explicitly have to flush the
180 * TLB via cr3 if the top-level pgd is changed...
184 #else /* !CONFIG_X86_PAE */
186 /* No need to prepopulate any pagetable entries in non-PAE modes. */
187 #define PREALLOCATED_PMDS 0
189 #endif /* CONFIG_X86_PAE */
191 static void free_pmds(pmd_t
*pmds
[])
195 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++)
197 pgtable_pmd_page_dtor(virt_to_page(pmds
[i
]));
198 free_page((unsigned long)pmds
[i
]);
202 static int preallocate_pmds(pmd_t
*pmds
[])
207 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
208 pmd_t
*pmd
= (pmd_t
*)__get_free_page(PGALLOC_GFP
);
211 if (pmd
&& !pgtable_pmd_page_ctor(virt_to_page(pmd
))) {
212 free_page((unsigned long)pmds
[i
]);
228 * Mop up any pmd pages which may still be attached to the pgd.
229 * Normally they will be freed by munmap/exit_mmap, but any pmd we
230 * preallocate which never got a corresponding vma will need to be
233 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
237 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
240 if (pgd_val(pgd
) != 0) {
241 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
243 pgdp
[i
] = native_make_pgd(0);
245 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
251 static void pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmds
[])
256 if (PREALLOCATED_PMDS
== 0) /* Work around gcc-3.4.x bug */
259 pud
= pud_offset(pgd
, 0);
261 for (i
= 0; i
< PREALLOCATED_PMDS
; i
++, pud
++) {
262 pmd_t
*pmd
= pmds
[i
];
264 if (i
>= KERNEL_PGD_BOUNDARY
)
265 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
266 sizeof(pmd_t
) * PTRS_PER_PMD
);
268 pud_populate(mm
, pud
, pmd
);
272 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
275 pmd_t
*pmds
[PREALLOCATED_PMDS
];
277 pgd
= (pgd_t
*)__get_free_page(PGALLOC_GFP
);
284 if (preallocate_pmds(pmds
) != 0)
287 if (paravirt_pgd_alloc(mm
) != 0)
291 * Make sure that pre-populating the pmds is atomic with
292 * respect to anything walking the pgd_list, so that they
293 * never see a partially populated pgd.
295 spin_lock(&pgd_lock
);
298 pgd_prepopulate_pmd(mm
, pgd
, pmds
);
300 spin_unlock(&pgd_lock
);
307 free_page((unsigned long)pgd
);
312 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
314 pgd_mop_up_pmds(mm
, pgd
);
316 paravirt_pgd_free(mm
, pgd
);
317 free_page((unsigned long)pgd
);
321 * Used to set accessed or dirty bits in the page table entries
322 * on other architectures. On x86, the accessed and dirty bits
323 * are tracked by hardware. However, do_wp_page calls this function
324 * to also make the pte writeable at the same time the dirty bit is
325 * set. In that case we do actually need to write the PTE.
327 int ptep_set_access_flags(struct vm_area_struct
*vma
,
328 unsigned long address
, pte_t
*ptep
,
329 pte_t entry
, int dirty
)
331 int changed
= !pte_same(*ptep
, entry
);
333 if (changed
&& dirty
) {
335 pte_update_defer(vma
->vm_mm
, address
, ptep
);
341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
343 unsigned long address
, pmd_t
*pmdp
,
344 pmd_t entry
, int dirty
)
346 int changed
= !pmd_same(*pmdp
, entry
);
348 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
350 if (changed
&& dirty
) {
352 pmd_update_defer(vma
->vm_mm
, address
, pmdp
);
354 * We had a write-protection fault here and changed the pmd
355 * to to more permissive. No need to flush the TLB for that,
356 * #PF is architecturally guaranteed to do that and in the
357 * worst-case we'll generate a spurious fault.
365 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
366 unsigned long addr
, pte_t
*ptep
)
370 if (pte_young(*ptep
))
371 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
372 (unsigned long *) &ptep
->pte
);
375 pte_update(vma
->vm_mm
, addr
, ptep
);
380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
381 int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
382 unsigned long addr
, pmd_t
*pmdp
)
386 if (pmd_young(*pmdp
))
387 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
388 (unsigned long *)pmdp
);
391 pmd_update(vma
->vm_mm
, addr
, pmdp
);
397 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
398 unsigned long address
, pte_t
*ptep
)
402 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
404 flush_tlb_page(vma
, address
);
409 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
410 int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
411 unsigned long address
, pmd_t
*pmdp
)
415 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
417 young
= pmdp_test_and_clear_young(vma
, address
, pmdp
);
419 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
424 void pmdp_splitting_flush(struct vm_area_struct
*vma
,
425 unsigned long address
, pmd_t
*pmdp
)
428 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
429 set
= !test_and_set_bit(_PAGE_BIT_SPLITTING
,
430 (unsigned long *)pmdp
);
432 pmd_update(vma
->vm_mm
, address
, pmdp
);
433 /* need tlb flush only to serialize against gup-fast */
434 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
440 * reserve_top_address - reserves a hole in the top of kernel address space
441 * @reserve - size of hole to reserve
443 * Can be used to relocate the fixmap area and poke a hole in the top
444 * of kernel address space to make room for a hypervisor.
446 void __init
reserve_top_address(unsigned long reserve
)
449 BUG_ON(fixmaps_set
> 0);
450 printk(KERN_INFO
"Reserving virtual address space above 0x%08x\n",
452 __FIXADDR_TOP
= -reserve
- PAGE_SIZE
;
458 void __native_set_fixmap(enum fixed_addresses idx
, pte_t pte
)
460 unsigned long address
= __fix_to_virt(idx
);
462 if (idx
>= __end_of_fixed_addresses
) {
466 set_pte_vaddr(address
, pte
);
470 void native_set_fixmap(enum fixed_addresses idx
, phys_addr_t phys
,
473 __native_set_fixmap(idx
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));