x86: add and use pgd/pud/pmd_flags
[deliverable/linux.git] / arch / x86 / include / asm / pgtable.h
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3
4 #define FIRST_USER_ADDRESS 0
5
6 #define _PAGE_BIT_PRESENT 0 /* is present */
7 #define _PAGE_BIT_RW 1 /* writeable */
8 #define _PAGE_BIT_USER 2 /* userspace addressable */
9 #define _PAGE_BIT_PWT 3 /* page write through */
10 #define _PAGE_BIT_PCD 4 /* page cache disabled */
11 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
15 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
18 #define _PAGE_BIT_UNUSED3 11
19 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
20 #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
22 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23
24 /* If _PAGE_BIT_PRESENT is clear, we use these: */
25 /* - if the user mapped it with PROT_NONE; pte_present gives true */
26 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
27 /* - set: nonlinear file mapping, saved PTE; unset:swap */
28 #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
29
30 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
31 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
32 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
33 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
34 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
35 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
36 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
37 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
38 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
39 #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
40 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
41 #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
42 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
43 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
44 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
45 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
46 #define __HAVE_ARCH_PTE_SPECIAL
47
48 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
49 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
50 #else
51 #define _PAGE_NX (_AT(pteval_t, 0))
52 #endif
53
54 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
55 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
56
57 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
58 _PAGE_ACCESSED | _PAGE_DIRTY)
59 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
60 _PAGE_DIRTY)
61
62 /* Set of bits not changed in pte_modify */
63 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
64 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
65
66 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
67 #define _PAGE_CACHE_WB (0)
68 #define _PAGE_CACHE_WC (_PAGE_PWT)
69 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
70 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
71
72 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
73 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
74 _PAGE_ACCESSED | _PAGE_NX)
75
76 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
77 _PAGE_USER | _PAGE_ACCESSED)
78 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED | _PAGE_NX)
80 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED)
82 #define PAGE_COPY PAGE_COPY_NOEXEC
83 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED | _PAGE_NX)
85 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED)
87
88 #define __PAGE_KERNEL_EXEC \
89 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
90 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
91
92 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
93 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
94 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
95 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
96 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
97 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
98 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
99 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
100 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
101 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
102 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
103
104 #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
105 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
106 #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
107 #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
108
109 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
110 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
111 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
112 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
113 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
114 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
115 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
116 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
117 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
118 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
120 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
121 #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122
123 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
124 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
125 #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
126 #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
127
128 /* xwr */
129 #define __P000 PAGE_NONE
130 #define __P001 PAGE_READONLY
131 #define __P010 PAGE_COPY
132 #define __P011 PAGE_COPY
133 #define __P100 PAGE_READONLY_EXEC
134 #define __P101 PAGE_READONLY_EXEC
135 #define __P110 PAGE_COPY_EXEC
136 #define __P111 PAGE_COPY_EXEC
137
138 #define __S000 PAGE_NONE
139 #define __S001 PAGE_READONLY
140 #define __S010 PAGE_SHARED
141 #define __S011 PAGE_SHARED
142 #define __S100 PAGE_READONLY_EXEC
143 #define __S101 PAGE_READONLY_EXEC
144 #define __S110 PAGE_SHARED_EXEC
145 #define __S111 PAGE_SHARED_EXEC
146
147 /*
148 * early identity mapping pte attrib macros.
149 */
150 #ifdef CONFIG_X86_64
151 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
152 #else
153 /*
154 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
155 * bits are combined, this will alow user to access the high address mapped
156 * VDSO in the presence of CONFIG_COMPAT_VDSO
157 */
158 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
159 #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
160 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
161 #endif
162
163 /*
164 * Macro to mark a page protection value as UC-
165 */
166 #define pgprot_noncached(prot) \
167 ((boot_cpu_data.x86 > 3) \
168 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
169 : (prot))
170
171 #ifndef __ASSEMBLY__
172
173 #define pgprot_writecombine pgprot_writecombine
174 extern pgprot_t pgprot_writecombine(pgprot_t prot);
175
176 /*
177 * ZERO_PAGE is a global shared page that is always zero: used
178 * for zero-mapped memory areas etc..
179 */
180 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
181 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
182
183 extern spinlock_t pgd_lock;
184 extern struct list_head pgd_list;
185
186 /*
187 * The following only work if pte_present() is true.
188 * Undefined behaviour if not..
189 */
190 static inline int pte_dirty(pte_t pte)
191 {
192 return pte_flags(pte) & _PAGE_DIRTY;
193 }
194
195 static inline int pte_young(pte_t pte)
196 {
197 return pte_flags(pte) & _PAGE_ACCESSED;
198 }
199
200 static inline int pte_write(pte_t pte)
201 {
202 return pte_flags(pte) & _PAGE_RW;
203 }
204
205 static inline int pte_file(pte_t pte)
206 {
207 return pte_flags(pte) & _PAGE_FILE;
208 }
209
210 static inline int pte_huge(pte_t pte)
211 {
212 return pte_flags(pte) & _PAGE_PSE;
213 }
214
215 static inline int pte_global(pte_t pte)
216 {
217 return pte_flags(pte) & _PAGE_GLOBAL;
218 }
219
220 static inline int pte_exec(pte_t pte)
221 {
222 return !(pte_flags(pte) & _PAGE_NX);
223 }
224
225 static inline int pte_special(pte_t pte)
226 {
227 return pte_flags(pte) & _PAGE_SPECIAL;
228 }
229
230 static inline unsigned long pte_pfn(pte_t pte)
231 {
232 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
233 }
234
235 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
236
237 static inline int pmd_large(pmd_t pte)
238 {
239 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT);
241 }
242
243 static inline pte_t pte_mkclean(pte_t pte)
244 {
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
246 }
247
248 static inline pte_t pte_mkold(pte_t pte)
249 {
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
251 }
252
253 static inline pte_t pte_wrprotect(pte_t pte)
254 {
255 return __pte(pte_val(pte) & ~_PAGE_RW);
256 }
257
258 static inline pte_t pte_mkexec(pte_t pte)
259 {
260 return __pte(pte_val(pte) & ~_PAGE_NX);
261 }
262
263 static inline pte_t pte_mkdirty(pte_t pte)
264 {
265 return __pte(pte_val(pte) | _PAGE_DIRTY);
266 }
267
268 static inline pte_t pte_mkyoung(pte_t pte)
269 {
270 return __pte(pte_val(pte) | _PAGE_ACCESSED);
271 }
272
273 static inline pte_t pte_mkwrite(pte_t pte)
274 {
275 return __pte(pte_val(pte) | _PAGE_RW);
276 }
277
278 static inline pte_t pte_mkhuge(pte_t pte)
279 {
280 return __pte(pte_val(pte) | _PAGE_PSE);
281 }
282
283 static inline pte_t pte_clrhuge(pte_t pte)
284 {
285 return __pte(pte_val(pte) & ~_PAGE_PSE);
286 }
287
288 static inline pte_t pte_mkglobal(pte_t pte)
289 {
290 return __pte(pte_val(pte) | _PAGE_GLOBAL);
291 }
292
293 static inline pte_t pte_clrglobal(pte_t pte)
294 {
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
296 }
297
298 static inline pte_t pte_mkspecial(pte_t pte)
299 {
300 return __pte(pte_val(pte) | _PAGE_SPECIAL);
301 }
302
303 extern pteval_t __supported_pte_mask;
304
305 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
306 {
307 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
308 pgprot_val(pgprot)) & __supported_pte_mask);
309 }
310
311 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
312 {
313 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
314 pgprot_val(pgprot)) & __supported_pte_mask);
315 }
316
317 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
318 {
319 pteval_t val = pte_val(pte);
320
321 /*
322 * Chop off the NX bit (if present), and add the NX portion of
323 * the newprot (if present):
324 */
325 val &= _PAGE_CHG_MASK;
326 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
327
328 return __pte(val);
329 }
330
331 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
332 #define pgprot_modify pgprot_modify
333 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
334 {
335 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
336 pgprotval_t addbits = pgprot_val(newprot);
337 return __pgprot(preservebits | addbits);
338 }
339
340 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
341
342 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
343
344 static inline int is_new_memtype_allowed(unsigned long flags,
345 unsigned long new_flags)
346 {
347 /*
348 * Certain new memtypes are not allowed with certain
349 * requested memtype:
350 * - request is uncached, return cannot be write-back
351 * - request is write-combine, return cannot be write-back
352 */
353 if ((flags == _PAGE_CACHE_UC_MINUS &&
354 new_flags == _PAGE_CACHE_WB) ||
355 (flags == _PAGE_CACHE_WC &&
356 new_flags == _PAGE_CACHE_WB)) {
357 return 0;
358 }
359
360 return 1;
361 }
362
363 #ifndef __ASSEMBLY__
364 /* Indicate that x86 has its own track and untrack pfn vma functions */
365 #define __HAVE_PFNMAP_TRACKING
366
367 #define __HAVE_PHYS_MEM_ACCESS_PROT
368 struct file;
369 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
370 unsigned long size, pgprot_t vma_prot);
371 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
372 unsigned long size, pgprot_t *vma_prot);
373 #endif
374
375 /* Install a pte for a particular vaddr in kernel space. */
376 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
377
378 #ifdef CONFIG_X86_32
379 extern void native_pagetable_setup_start(pgd_t *base);
380 extern void native_pagetable_setup_done(pgd_t *base);
381 #else
382 static inline void native_pagetable_setup_start(pgd_t *base) {}
383 static inline void native_pagetable_setup_done(pgd_t *base) {}
384 #endif
385
386 struct seq_file;
387 extern void arch_report_meminfo(struct seq_file *m);
388
389 #ifdef CONFIG_PARAVIRT
390 #include <asm/paravirt.h>
391 #else /* !CONFIG_PARAVIRT */
392 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
393 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
394
395 #define set_pte_present(mm, addr, ptep, pte) \
396 native_set_pte_present(mm, addr, ptep, pte)
397 #define set_pte_atomic(ptep, pte) \
398 native_set_pte_atomic(ptep, pte)
399
400 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
401
402 #ifndef __PAGETABLE_PUD_FOLDED
403 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
404 #define pgd_clear(pgd) native_pgd_clear(pgd)
405 #endif
406
407 #ifndef set_pud
408 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
409 #endif
410
411 #ifndef __PAGETABLE_PMD_FOLDED
412 #define pud_clear(pud) native_pud_clear(pud)
413 #endif
414
415 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
416 #define pmd_clear(pmd) native_pmd_clear(pmd)
417
418 #define pte_update(mm, addr, ptep) do { } while (0)
419 #define pte_update_defer(mm, addr, ptep) do { } while (0)
420
421 static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
422 {
423 native_pagetable_setup_start(base);
424 }
425
426 static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
427 {
428 native_pagetable_setup_done(base);
429 }
430 #endif /* CONFIG_PARAVIRT */
431
432 #endif /* __ASSEMBLY__ */
433
434 #ifdef CONFIG_X86_32
435 # include "pgtable_32.h"
436 #else
437 # include "pgtable_64.h"
438 #endif
439
440 #ifndef __ASSEMBLY__
441 #include <linux/mm_types.h>
442
443 static inline int pte_none(pte_t pte)
444 {
445 return !pte.pte;
446 }
447
448 #define __HAVE_ARCH_PTE_SAME
449 static inline int pte_same(pte_t a, pte_t b)
450 {
451 return a.pte == b.pte;
452 }
453
454 static inline int pte_present(pte_t a)
455 {
456 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
457 }
458
459 static inline int pmd_present(pmd_t pmd)
460 {
461 return pmd_flags(pmd) & _PAGE_PRESENT;
462 }
463
464 static inline int pmd_none(pmd_t pmd)
465 {
466 /* Only check low word on 32-bit platforms, since it might be
467 out of sync with upper half. */
468 return !(unsigned long)native_pmd_val(pmd);
469 }
470
471 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
472 {
473 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
474 }
475
476 static inline struct page *pmd_page(pmd_t pmd)
477 {
478 return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT);
479 }
480
481 /*
482 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
483 *
484 * this macro returns the index of the entry in the pmd page which would
485 * control the given virtual address
486 */
487 static inline unsigned pmd_index(unsigned long address)
488 {
489 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
490 }
491
492 /*
493 * Conversion functions: convert a page and protection to a page entry,
494 * and a page entry and page directory to the page they refer to.
495 *
496 * (Currently stuck as a macro because of indirect forward reference
497 * to linux/mm.h:page_to_nid())
498 */
499 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
500
501 /*
502 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
503 *
504 * this function returns the index of the entry in the pte page which would
505 * control the given virtual address
506 */
507 static inline unsigned pte_index(unsigned long address)
508 {
509 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
510 }
511
512 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
513 {
514 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
515 }
516
517 static inline int pmd_bad(pmd_t pmd)
518 {
519 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
520 }
521
522 static inline unsigned long pages_to_mb(unsigned long npg)
523 {
524 return npg >> (20 - PAGE_SHIFT);
525 }
526
527 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
528 remap_pfn_range(vma, vaddr, pfn, size, prot)
529
530 #if PAGETABLE_LEVELS > 2
531 static inline int pud_none(pud_t pud)
532 {
533 return pud_val(pud) == 0;
534 }
535
536 static inline int pud_present(pud_t pud)
537 {
538 return pud_flags(pud) & _PAGE_PRESENT;
539 }
540
541 static inline unsigned long pud_page_vaddr(pud_t pud)
542 {
543 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
544 }
545
546 static inline struct page *pud_page(pud_t pud)
547 {
548 return pfn_to_page(pud_val(pud) >> PAGE_SHIFT);
549 }
550
551 /* Find an entry in the second-level page table.. */
552 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
553 {
554 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
555 }
556
557 static inline unsigned long pmd_pfn(pmd_t pmd)
558 {
559 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
560 }
561
562 static inline int pud_large(pud_t pud)
563 {
564 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
565 (_PAGE_PSE | _PAGE_PRESENT);
566 }
567
568 static inline int pud_bad(pud_t pud)
569 {
570 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
571 }
572 #endif /* PAGETABLE_LEVELS > 2 */
573
574 #if PAGETABLE_LEVELS > 3
575 static inline int pgd_present(pgd_t pgd)
576 {
577 return pgd_flags(pgd) & _PAGE_PRESENT;
578 }
579
580 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
581 {
582 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
583 }
584
585 static inline struct page *pgd_page(pgd_t pgd)
586 {
587 return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT);
588 }
589
590 /* to find an entry in a page-table-directory. */
591 static inline unsigned pud_index(unsigned long address)
592 {
593 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
594 }
595
596 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
597 {
598 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
599 }
600
601 static inline int pgd_bad(pgd_t pgd)
602 {
603 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
604 }
605
606 static inline int pgd_none(pgd_t pgd)
607 {
608 return !pgd_val(pgd);
609 }
610 #endif /* PAGETABLE_LEVELS > 3 */
611
612 #endif /* __ASSEMBLY__ */
613
614 /*
615 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
616 *
617 * this macro returns the index of the entry in the pgd page which would
618 * control the given virtual address
619 */
620 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
621
622 /*
623 * pgd_offset() returns a (pgd_t *)
624 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
625 */
626 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
627 /*
628 * a shortcut which implies the use of the kernel's pgd, instead
629 * of a process's
630 */
631 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
632
633
634 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
635 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
636
637 #ifndef __ASSEMBLY__
638
639 enum {
640 PG_LEVEL_NONE,
641 PG_LEVEL_4K,
642 PG_LEVEL_2M,
643 PG_LEVEL_1G,
644 PG_LEVEL_NUM
645 };
646
647 #ifdef CONFIG_PROC_FS
648 extern void update_page_count(int level, unsigned long pages);
649 #else
650 static inline void update_page_count(int level, unsigned long pages) { }
651 #endif
652
653 /*
654 * Helper function that returns the kernel pagetable entry controlling
655 * the virtual address 'address'. NULL means no pagetable entry present.
656 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
657 * as a pte too.
658 */
659 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
660
661 /* local pte updates need not use xchg for locking */
662 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
663 {
664 pte_t res = *ptep;
665
666 /* Pure native function needs no input for mm, addr */
667 native_pte_clear(NULL, 0, ptep);
668 return res;
669 }
670
671 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
672 pte_t *ptep , pte_t pte)
673 {
674 native_set_pte(ptep, pte);
675 }
676
677 #ifndef CONFIG_PARAVIRT
678 /*
679 * Rules for using pte_update - it must be called after any PTE update which
680 * has not been done using the set_pte / clear_pte interfaces. It is used by
681 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
682 * updates should either be sets, clears, or set_pte_atomic for P->P
683 * transitions, which means this hook should only be called for user PTEs.
684 * This hook implies a P->P protection or access change has taken place, which
685 * requires a subsequent TLB flush. The notification can optionally be delayed
686 * until the TLB flush event by using the pte_update_defer form of the
687 * interface, but care must be taken to assure that the flush happens while
688 * still holding the same page table lock so that the shadow and primary pages
689 * do not become out of sync on SMP.
690 */
691 #define pte_update(mm, addr, ptep) do { } while (0)
692 #define pte_update_defer(mm, addr, ptep) do { } while (0)
693 #endif
694
695 /*
696 * We only update the dirty/accessed state if we set
697 * the dirty bit by hand in the kernel, since the hardware
698 * will do the accessed bit for us, and we don't want to
699 * race with other CPU's that might be updating the dirty
700 * bit at the same time.
701 */
702 struct vm_area_struct;
703
704 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
705 extern int ptep_set_access_flags(struct vm_area_struct *vma,
706 unsigned long address, pte_t *ptep,
707 pte_t entry, int dirty);
708
709 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
710 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
711 unsigned long addr, pte_t *ptep);
712
713 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
714 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
715 unsigned long address, pte_t *ptep);
716
717 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
718 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
719 pte_t *ptep)
720 {
721 pte_t pte = native_ptep_get_and_clear(ptep);
722 pte_update(mm, addr, ptep);
723 return pte;
724 }
725
726 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
727 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
728 unsigned long addr, pte_t *ptep,
729 int full)
730 {
731 pte_t pte;
732 if (full) {
733 /*
734 * Full address destruction in progress; paravirt does not
735 * care about updates and native needs no locking
736 */
737 pte = native_local_ptep_get_and_clear(ptep);
738 } else {
739 pte = ptep_get_and_clear(mm, addr, ptep);
740 }
741 return pte;
742 }
743
744 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
745 static inline void ptep_set_wrprotect(struct mm_struct *mm,
746 unsigned long addr, pte_t *ptep)
747 {
748 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
749 pte_update(mm, addr, ptep);
750 }
751
752 /*
753 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
754 *
755 * dst - pointer to pgd range anwhere on a pgd page
756 * src - ""
757 * count - the number of pgds to copy.
758 *
759 * dst and src can be on the same page, but the range must not overlap,
760 * and must not cross a page boundary.
761 */
762 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
763 {
764 memcpy(dst, src, count * sizeof(pgd_t));
765 }
766
767
768 #include <asm-generic/pgtable.h>
769 #endif /* __ASSEMBLY__ */
770
771 #endif /* _ASM_X86_PGTABLE_H */
This page took 0.047593 seconds and 6 git commands to generate.