cfq-iosched: fix rcu freeing of cfq io contexts
[deliverable/linux.git] / include / asm-x86 / pgalloc_32.h
CommitLineData
1da177e4
LT
1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H
3
1da177e4
LT
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
5aa05085 6#include <linux/pagemap.h>
a5a19c63
JF
7#include <asm/tlb.h>
8#include <asm-generic/tlb.h>
1da177e4 9
c119ecce
ZA
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
fdb4c338 13#define paravirt_alloc_pt(mm, pfn) do { } while (0)
6c435456 14#define paravirt_alloc_pd(mm, pfn) do { } while (0)
c119ecce
ZA
15#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
16#define paravirt_release_pt(pfn) do { } while (0)
17#define paravirt_release_pd(pfn) do { } while (0)
18#endif
19
a5a19c63
JF
20static inline void pmd_populate_kernel(struct mm_struct *mm,
21 pmd_t *pmd, pte_t *pte)
22{
23 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
24 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
25}
1da177e4 26
a5a19c63
JF
27static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
28{
29 unsigned long pfn = page_to_pfn(pte);
30
31 paravirt_alloc_pt(mm, pfn);
32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
33}
2f569afd 34#define pmd_pgtable(pmd) pmd_page(pmd)
c119ecce 35
1da177e4
LT
36/*
37 * Allocate and free page tables.
38 */
39extern pgd_t *pgd_alloc(struct mm_struct *);
5e541973 40extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
1da177e4
LT
41
42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
2f569afd 43extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
1da177e4 44
5e541973 45static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
1da177e4
LT
46{
47 free_page((unsigned long)pte);
48}
49
2f569afd 50static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1da177e4 51{
2f569afd 52 pgtable_page_dtor(pte);
1da177e4
LT
53 __free_page(pte);
54}
55
56
5aa05085 57extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
1da177e4
LT
58
59#ifdef CONFIG_X86_PAE
60/*
61 * In the PAE case we free the pmds as part of the pgd.
62 */
a5a19c63
JF
63static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
64{
6194ba6f 65 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
a5a19c63
JF
66}
67
5e541973 68static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
a5a19c63 69{
6194ba6f
JF
70 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
71 free_page((unsigned long)pmd);
a5a19c63
JF
72}
73
5aa05085 74extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
a5a19c63 75
6194ba6f 76static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
a5a19c63 77{
6194ba6f
JF
78 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
79
80 /* Note: almost everything apart from _PAGE_PRESENT is
81 reserved at the pmd (PDPT) level. */
82 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
83
84 /*
f5430f93
JF
85 * According to Intel App note "TLBs, Paging-Structure Caches,
86 * and Their Invalidation", April 2007, document 317080-001,
87 * section 8.1: in PAE mode we explicitly have to flush the
88 * TLB via cr3 if the top-level pgd is changed...
6194ba6f
JF
89 */
90 if (mm == current->active_mm)
91 write_cr3(read_cr3());
a5a19c63
JF
92}
93#endif /* CONFIG_X86_PAE */
1da177e4 94
1da177e4 95#endif /* _I386_PGALLOC_H */
This page took 0.369934 seconds and 5 git commands to generate.