Commit | Line | Data |
---|---|---|
6c386655 JF |
1 | #ifndef _ASM_X86_PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | |
3 | ||
6c386655 JF |
4 | #define FIRST_USER_ADDRESS 0 |
5 | ||
43cdf5d6 JS |
6 | #define _PAGE_BIT_PRESENT 0 /* is present */ |
7 | #define _PAGE_BIT_RW 1 /* writeable */ | |
8 | #define _PAGE_BIT_USER 2 /* userspace addressable */ | |
9 | #define _PAGE_BIT_PWT 3 /* page write through */ | |
10 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ | |
11 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ | |
12 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ | |
6c386655 JF |
13 | #define _PAGE_BIT_FILE 6 |
14 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | |
9bf5a475 | 15 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
6c386655 JF |
16 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ |
17 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | |
18 | #define _PAGE_BIT_UNUSED2 10 | |
19 | #define _PAGE_BIT_UNUSED3 11 | |
9bf5a475 | 20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
a0a8f536 | 21 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 |
110e0358 | 22 | #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 |
6c386655 JF |
23 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
24 | ||
4226ab93 JF |
25 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
26 | #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) | |
27 | #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) | |
28 | #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) | |
29 | #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) | |
30 | #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) | |
31 | #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) | |
32 | #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) | |
33 | #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) | |
34 | #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) | |
35 | #define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2) | |
36 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) | |
37 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) | |
38 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) | |
a0a8f536 | 39 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) |
110e0358 | 40 | #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) |
a0a8f536 | 41 | #define __HAVE_ARCH_PTE_SPECIAL |
6c386655 JF |
42 | |
43 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
4226ab93 | 44 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
6c386655 | 45 | #else |
4226ab93 | 46 | #define _PAGE_NX (_AT(pteval_t, 0)) |
6c386655 JF |
47 | #endif |
48 | ||
49 | /* If _PAGE_PRESENT is clear, we use these: */ | |
3cbaeafe JP |
50 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, |
51 | * saved PTE; unset:swap */ | |
6c386655 JF |
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; |
53 | pte_present gives true */ | |
54 | ||
3cbaeafe JP |
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
56 | _PAGE_ACCESSED | _PAGE_DIRTY) | |
57 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | |
58 | _PAGE_DIRTY) | |
6c386655 | 59 | |
86aaf4fd | 60 | /* Set of bits not changed in pte_modify */ |
59438c9f | 61 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
a0a8f536 | 62 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) |
6c386655 | 63 | |
2e5d9c85 | 64 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
65 | #define _PAGE_CACHE_WB (0) | |
66 | #define _PAGE_CACHE_WC (_PAGE_PWT) | |
67 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | |
68 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | |
69 | ||
6c386655 | 70 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
3cbaeafe JP |
71 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
72 | _PAGE_ACCESSED | _PAGE_NX) | |
73 | ||
74 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | |
75 | _PAGE_USER | _PAGE_ACCESSED) | |
76 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
77 | _PAGE_ACCESSED | _PAGE_NX) | |
78 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
79 | _PAGE_ACCESSED) | |
6c386655 | 80 | #define PAGE_COPY PAGE_COPY_NOEXEC |
3cbaeafe JP |
81 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
82 | _PAGE_ACCESSED | _PAGE_NX) | |
83 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
84 | _PAGE_ACCESSED) | |
6c386655 | 85 | |
6c386655 | 86 | #define __PAGE_KERNEL_EXEC \ |
8490638c | 87 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) |
6c386655 | 88 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) |
6c386655 JF |
89 | |
90 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | |
91 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | |
d2e626f4 | 92 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) |
b310f381 | 93 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) |
6c386655 | 94 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) |
d546b67a | 95 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) |
6c386655 JF |
96 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
97 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | |
98 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | |
3a9e189d | 99 | #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) |
6c386655 JF |
100 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
101 | ||
8490638c JF |
102 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) |
103 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | |
104 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | |
105 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) | |
106 | #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) | |
107 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | |
108 | #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) | |
109 | #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) | |
110 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | |
3a9e189d | 111 | #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) |
8490638c JF |
112 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) |
113 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) | |
114 | #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) | |
6c386655 JF |
115 | |
116 | /* xwr */ | |
117 | #define __P000 PAGE_NONE | |
118 | #define __P001 PAGE_READONLY | |
119 | #define __P010 PAGE_COPY | |
120 | #define __P011 PAGE_COPY | |
121 | #define __P100 PAGE_READONLY_EXEC | |
122 | #define __P101 PAGE_READONLY_EXEC | |
123 | #define __P110 PAGE_COPY_EXEC | |
124 | #define __P111 PAGE_COPY_EXEC | |
125 | ||
126 | #define __S000 PAGE_NONE | |
127 | #define __S001 PAGE_READONLY | |
128 | #define __S010 PAGE_SHARED | |
129 | #define __S011 PAGE_SHARED | |
130 | #define __S100 PAGE_READONLY_EXEC | |
131 | #define __S101 PAGE_READONLY_EXEC | |
132 | #define __S110 PAGE_SHARED_EXEC | |
133 | #define __S111 PAGE_SHARED_EXEC | |
134 | ||
4614139c | 135 | #ifndef __ASSEMBLY__ |
195466dc | 136 | |
8405b122 JF |
137 | /* |
138 | * ZERO_PAGE is a global shared page that is always zero: used | |
139 | * for zero-mapped memory areas etc.. | |
140 | */ | |
3cbaeafe | 141 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
8405b122 JF |
142 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
143 | ||
e3ed910d JF |
144 | extern spinlock_t pgd_lock; |
145 | extern struct list_head pgd_list; | |
8405b122 | 146 | |
4614139c JF |
147 | /* |
148 | * The following only work if pte_present() is true. | |
149 | * Undefined behaviour if not.. | |
150 | */ | |
3cbaeafe JP |
151 | static inline int pte_dirty(pte_t pte) |
152 | { | |
a15af1c9 | 153 | return pte_flags(pte) & _PAGE_DIRTY; |
3cbaeafe JP |
154 | } |
155 | ||
156 | static inline int pte_young(pte_t pte) | |
157 | { | |
a15af1c9 | 158 | return pte_flags(pte) & _PAGE_ACCESSED; |
3cbaeafe JP |
159 | } |
160 | ||
161 | static inline int pte_write(pte_t pte) | |
162 | { | |
a15af1c9 | 163 | return pte_flags(pte) & _PAGE_RW; |
3cbaeafe JP |
164 | } |
165 | ||
166 | static inline int pte_file(pte_t pte) | |
167 | { | |
a15af1c9 | 168 | return pte_flags(pte) & _PAGE_FILE; |
3cbaeafe JP |
169 | } |
170 | ||
171 | static inline int pte_huge(pte_t pte) | |
172 | { | |
a15af1c9 | 173 | return pte_flags(pte) & _PAGE_PSE; |
4614139c JF |
174 | } |
175 | ||
3cbaeafe JP |
176 | static inline int pte_global(pte_t pte) |
177 | { | |
a15af1c9 | 178 | return pte_flags(pte) & _PAGE_GLOBAL; |
3cbaeafe JP |
179 | } |
180 | ||
181 | static inline int pte_exec(pte_t pte) | |
182 | { | |
a15af1c9 | 183 | return !(pte_flags(pte) & _PAGE_NX); |
3cbaeafe JP |
184 | } |
185 | ||
7e675137 NP |
186 | static inline int pte_special(pte_t pte) |
187 | { | |
a0a8f536 | 188 | return pte_val(pte) & _PAGE_SPECIAL; |
7e675137 NP |
189 | } |
190 | ||
3cbaeafe JP |
191 | static inline int pmd_large(pmd_t pte) |
192 | { | |
193 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | |
194 | (_PAGE_PSE | _PAGE_PRESENT); | |
195 | } | |
196 | ||
197 | static inline pte_t pte_mkclean(pte_t pte) | |
198 | { | |
4226ab93 | 199 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
3cbaeafe JP |
200 | } |
201 | ||
202 | static inline pte_t pte_mkold(pte_t pte) | |
203 | { | |
4226ab93 | 204 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
3cbaeafe JP |
205 | } |
206 | ||
207 | static inline pte_t pte_wrprotect(pte_t pte) | |
208 | { | |
4226ab93 | 209 | return __pte(pte_val(pte) & ~_PAGE_RW); |
3cbaeafe JP |
210 | } |
211 | ||
212 | static inline pte_t pte_mkexec(pte_t pte) | |
213 | { | |
4226ab93 | 214 | return __pte(pte_val(pte) & ~_PAGE_NX); |
3cbaeafe JP |
215 | } |
216 | ||
217 | static inline pte_t pte_mkdirty(pte_t pte) | |
218 | { | |
219 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
220 | } | |
221 | ||
222 | static inline pte_t pte_mkyoung(pte_t pte) | |
223 | { | |
224 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
225 | } | |
226 | ||
227 | static inline pte_t pte_mkwrite(pte_t pte) | |
228 | { | |
229 | return __pte(pte_val(pte) | _PAGE_RW); | |
230 | } | |
231 | ||
232 | static inline pte_t pte_mkhuge(pte_t pte) | |
233 | { | |
234 | return __pte(pte_val(pte) | _PAGE_PSE); | |
235 | } | |
236 | ||
237 | static inline pte_t pte_clrhuge(pte_t pte) | |
238 | { | |
4226ab93 | 239 | return __pte(pte_val(pte) & ~_PAGE_PSE); |
3cbaeafe JP |
240 | } |
241 | ||
242 | static inline pte_t pte_mkglobal(pte_t pte) | |
243 | { | |
244 | return __pte(pte_val(pte) | _PAGE_GLOBAL); | |
245 | } | |
246 | ||
247 | static inline pte_t pte_clrglobal(pte_t pte) | |
248 | { | |
4226ab93 | 249 | return __pte(pte_val(pte) & ~_PAGE_GLOBAL); |
3cbaeafe | 250 | } |
4614139c | 251 | |
7e675137 NP |
252 | static inline pte_t pte_mkspecial(pte_t pte) |
253 | { | |
a0a8f536 | 254 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
7e675137 NP |
255 | } |
256 | ||
6fdc05d4 JF |
257 | extern pteval_t __supported_pte_mask; |
258 | ||
259 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
260 | { | |
261 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
262 | pgprot_val(pgprot)) & __supported_pte_mask); | |
263 | } | |
264 | ||
265 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
266 | { | |
267 | return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
268 | pgprot_val(pgprot)) & __supported_pte_mask); | |
269 | } | |
270 | ||
38472311 IM |
271 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
272 | { | |
273 | pteval_t val = pte_val(pte); | |
274 | ||
275 | /* | |
276 | * Chop off the NX bit (if present), and add the NX portion of | |
277 | * the newprot (if present): | |
278 | */ | |
1c12c4cf VP |
279 | val &= _PAGE_CHG_MASK; |
280 | val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; | |
38472311 IM |
281 | |
282 | return __pte(val); | |
283 | } | |
284 | ||
1c12c4cf VP |
285 | /* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
286 | #define pgprot_modify pgprot_modify | |
287 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
288 | { | |
289 | pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; | |
290 | pgprotval_t addbits = pgprot_val(newprot); | |
291 | return __pgprot(preservebits | addbits); | |
292 | } | |
293 | ||
77be1fab | 294 | #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
c6ca18eb | 295 | |
1e8e23bc AK |
296 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) |
297 | ||
f0970c13 | 298 | #ifndef __ASSEMBLY__ |
299 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
300 | struct file; | |
301 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
302 | unsigned long size, pgprot_t vma_prot); | |
303 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |
304 | unsigned long size, pgprot_t *vma_prot); | |
305 | #endif | |
306 | ||
d494a961 JF |
307 | /* Install a pte for a particular vaddr in kernel space. */ |
308 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | |
309 | ||
a312b37b EH |
310 | #ifdef CONFIG_X86_32 |
311 | extern void native_pagetable_setup_start(pgd_t *base); | |
312 | extern void native_pagetable_setup_done(pgd_t *base); | |
313 | #else | |
314 | static inline void native_pagetable_setup_start(pgd_t *base) {} | |
315 | static inline void native_pagetable_setup_done(pgd_t *base) {} | |
316 | #endif | |
317 | ||
4891645e JF |
318 | #ifdef CONFIG_PARAVIRT |
319 | #include <asm/paravirt.h> | |
320 | #else /* !CONFIG_PARAVIRT */ | |
321 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
322 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
323 | ||
324 | #define set_pte_present(mm, addr, ptep, pte) \ | |
325 | native_set_pte_present(mm, addr, ptep, pte) | |
326 | #define set_pte_atomic(ptep, pte) \ | |
327 | native_set_pte_atomic(ptep, pte) | |
328 | ||
329 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
330 | ||
331 | #ifndef __PAGETABLE_PUD_FOLDED | |
332 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) | |
333 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
334 | #endif | |
335 | ||
336 | #ifndef set_pud | |
337 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
338 | #endif | |
339 | ||
340 | #ifndef __PAGETABLE_PMD_FOLDED | |
341 | #define pud_clear(pud) native_pud_clear(pud) | |
342 | #endif | |
343 | ||
344 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
345 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
346 | ||
347 | #define pte_update(mm, addr, ptep) do { } while (0) | |
348 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
a312b37b EH |
349 | |
350 | static inline void __init paravirt_pagetable_setup_start(pgd_t *base) | |
351 | { | |
352 | native_pagetable_setup_start(base); | |
353 | } | |
354 | ||
355 | static inline void __init paravirt_pagetable_setup_done(pgd_t *base) | |
356 | { | |
357 | native_pagetable_setup_done(base); | |
358 | } | |
4891645e JF |
359 | #endif /* CONFIG_PARAVIRT */ |
360 | ||
4614139c JF |
361 | #endif /* __ASSEMBLY__ */ |
362 | ||
96a388de TG |
363 | #ifdef CONFIG_X86_32 |
364 | # include "pgtable_32.h" | |
365 | #else | |
366 | # include "pgtable_64.h" | |
367 | #endif | |
6c386655 | 368 | |
fb15a9b3 JF |
369 | /* |
370 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
371 | * | |
372 | * this macro returns the index of the entry in the pgd page which would | |
373 | * control the given virtual address | |
374 | */ | |
375 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
376 | ||
377 | /* | |
378 | * pgd_offset() returns a (pgd_t *) | |
379 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
380 | */ | |
381 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) | |
382 | /* | |
383 | * a shortcut which implies the use of the kernel's pgd, instead | |
384 | * of a process's | |
385 | */ | |
386 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) | |
387 | ||
388 | ||
68db065c JF |
389 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
390 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) | |
391 | ||
195466dc JF |
392 | #ifndef __ASSEMBLY__ |
393 | ||
30551bb3 TG |
394 | enum { |
395 | PG_LEVEL_NONE, | |
396 | PG_LEVEL_4K, | |
397 | PG_LEVEL_2M, | |
86f03989 | 398 | PG_LEVEL_1G, |
ce0c0e50 | 399 | PG_LEVEL_NUM |
30551bb3 TG |
400 | }; |
401 | ||
65280e61 TG |
402 | #ifdef CONFIG_PROC_FS |
403 | extern void update_page_count(int level, unsigned long pages); | |
404 | #else | |
405 | static inline void update_page_count(int level, unsigned long pages) { } | |
406 | #endif | |
ce0c0e50 | 407 | |
0a663088 TG |
408 | /* |
409 | * Helper function that returns the kernel pagetable entry controlling | |
410 | * the virtual address 'address'. NULL means no pagetable entry present. | |
411 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | |
412 | * as a pte too. | |
413 | */ | |
da7bfc50 | 414 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
0a663088 | 415 | |
4891645e JF |
416 | /* local pte updates need not use xchg for locking */ |
417 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
418 | { | |
419 | pte_t res = *ptep; | |
420 | ||
421 | /* Pure native function needs no input for mm, addr */ | |
422 | native_pte_clear(NULL, 0, ptep); | |
423 | return res; | |
424 | } | |
425 | ||
426 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
427 | pte_t *ptep , pte_t pte) | |
428 | { | |
429 | native_set_pte(ptep, pte); | |
430 | } | |
431 | ||
195466dc JF |
432 | #ifndef CONFIG_PARAVIRT |
433 | /* | |
434 | * Rules for using pte_update - it must be called after any PTE update which | |
435 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
436 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
437 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
438 | * transitions, which means this hook should only be called for user PTEs. | |
439 | * This hook implies a P->P protection or access change has taken place, which | |
440 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
441 | * until the TLB flush event by using the pte_update_defer form of the | |
442 | * interface, but care must be taken to assure that the flush happens while | |
443 | * still holding the same page table lock so that the shadow and primary pages | |
444 | * do not become out of sync on SMP. | |
445 | */ | |
446 | #define pte_update(mm, addr, ptep) do { } while (0) | |
447 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
448 | #endif | |
449 | ||
195466dc JF |
450 | /* |
451 | * We only update the dirty/accessed state if we set | |
452 | * the dirty bit by hand in the kernel, since the hardware | |
453 | * will do the accessed bit for us, and we don't want to | |
454 | * race with other CPU's that might be updating the dirty | |
455 | * bit at the same time. | |
456 | */ | |
bea41808 JF |
457 | struct vm_area_struct; |
458 | ||
195466dc | 459 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
ee5aa8d3 JF |
460 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
461 | unsigned long address, pte_t *ptep, | |
462 | pte_t entry, int dirty); | |
195466dc JF |
463 | |
464 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
f9fbf1a3 JF |
465 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
466 | unsigned long addr, pte_t *ptep); | |
195466dc JF |
467 | |
468 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
c20311e1 JF |
469 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
470 | unsigned long address, pte_t *ptep); | |
195466dc JF |
471 | |
472 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3cbaeafe JP |
473 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
474 | pte_t *ptep) | |
195466dc JF |
475 | { |
476 | pte_t pte = native_ptep_get_and_clear(ptep); | |
477 | pte_update(mm, addr, ptep); | |
478 | return pte; | |
479 | } | |
480 | ||
481 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
3cbaeafe JP |
482 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
483 | unsigned long addr, pte_t *ptep, | |
484 | int full) | |
195466dc JF |
485 | { |
486 | pte_t pte; | |
487 | if (full) { | |
488 | /* | |
489 | * Full address destruction in progress; paravirt does not | |
490 | * care about updates and native needs no locking | |
491 | */ | |
492 | pte = native_local_ptep_get_and_clear(ptep); | |
493 | } else { | |
494 | pte = ptep_get_and_clear(mm, addr, ptep); | |
495 | } | |
496 | return pte; | |
497 | } | |
498 | ||
499 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3cbaeafe JP |
500 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
501 | unsigned long addr, pte_t *ptep) | |
195466dc | 502 | { |
d8d89827 | 503 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
504 | pte_update(mm, addr, ptep); |
505 | } | |
506 | ||
85958b46 JF |
507 | /* |
508 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
509 | * | |
510 | * dst - pointer to pgd range anwhere on a pgd page | |
511 | * src - "" | |
512 | * count - the number of pgds to copy. | |
513 | * | |
514 | * dst and src can be on the same page, but the range must not overlap, | |
515 | * and must not cross a page boundary. | |
516 | */ | |
517 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
518 | { | |
519 | memcpy(dst, src, count * sizeof(pgd_t)); | |
520 | } | |
521 | ||
522 | ||
195466dc JF |
523 | #include <asm-generic/pgtable.h> |
524 | #endif /* __ASSEMBLY__ */ | |
525 | ||
6c386655 | 526 | #endif /* _ASM_X86_PGTABLE_H */ |