x86, pgtable.h: fix 2-level 32-bit build
[deliverable/linux.git] / arch / x86 / include / asm / pgtable.h
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3
4 #include <asm/page.h>
5
6 #define FIRST_USER_ADDRESS 0
7
8 #define _PAGE_BIT_PRESENT 0 /* is present */
9 #define _PAGE_BIT_RW 1 /* writeable */
10 #define _PAGE_BIT_USER 2 /* userspace addressable */
11 #define _PAGE_BIT_PWT 3 /* page write through */
12 #define _PAGE_BIT_PCD 4 /* page cache disabled */
13 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
14 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
19 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
20 #define _PAGE_BIT_UNUSED3 11
21 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
22 #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
23 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
24 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
25
26 /* If _PAGE_BIT_PRESENT is clear, we use these: */
27 /* - if the user mapped it with PROT_NONE; pte_present gives true */
28 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
29 /* - set: nonlinear file mapping, saved PTE; unset:swap */
30 #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
31
32 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
33 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
34 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
35 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
36 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
37 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
38 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
39 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
40 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
41 #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
42 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
43 #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
44 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
45 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
48 #define __HAVE_ARCH_PTE_SPECIAL
49
50 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
51 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
52 #else
53 #define _PAGE_NX (_AT(pteval_t, 0))
54 #endif
55
56 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
57 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
58
59 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
60 _PAGE_ACCESSED | _PAGE_DIRTY)
61 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
62 _PAGE_DIRTY)
63
64 /* Set of bits not changed in pte_modify */
65 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
66 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
67
68 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
69 #define _PAGE_CACHE_WB (0)
70 #define _PAGE_CACHE_WC (_PAGE_PWT)
71 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
72 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
73
74 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
75 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
76 _PAGE_ACCESSED | _PAGE_NX)
77
78 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
79 _PAGE_USER | _PAGE_ACCESSED)
80 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED | _PAGE_NX)
82 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
83 _PAGE_ACCESSED)
84 #define PAGE_COPY PAGE_COPY_NOEXEC
85 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED | _PAGE_NX)
87 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
88 _PAGE_ACCESSED)
89
90 #define __PAGE_KERNEL_EXEC \
91 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
92 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
93
94 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
95 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
96 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
97 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
98 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
99 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
100 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
101 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
102 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
103 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
104 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
105
106 #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
107 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
108 #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
109 #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
110
111 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
112 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
113 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
114 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
115 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
116 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
117 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
118 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
119 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
120 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
121 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
122 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
123 #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
124
125 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
126 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
127 #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
128 #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
129
130 /* xwr */
131 #define __P000 PAGE_NONE
132 #define __P001 PAGE_READONLY
133 #define __P010 PAGE_COPY
134 #define __P011 PAGE_COPY
135 #define __P100 PAGE_READONLY_EXEC
136 #define __P101 PAGE_READONLY_EXEC
137 #define __P110 PAGE_COPY_EXEC
138 #define __P111 PAGE_COPY_EXEC
139
140 #define __S000 PAGE_NONE
141 #define __S001 PAGE_READONLY
142 #define __S010 PAGE_SHARED
143 #define __S011 PAGE_SHARED
144 #define __S100 PAGE_READONLY_EXEC
145 #define __S101 PAGE_READONLY_EXEC
146 #define __S110 PAGE_SHARED_EXEC
147 #define __S111 PAGE_SHARED_EXEC
148
149 /*
150 * early identity mapping pte attrib macros.
151 */
152 #ifdef CONFIG_X86_64
153 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
154 #else
155 /*
156 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
157 * bits are combined, this will alow user to access the high address mapped
158 * VDSO in the presence of CONFIG_COMPAT_VDSO
159 */
160 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
161 #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
162 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
163 #endif
164
165 /*
166 * Macro to mark a page protection value as UC-
167 */
168 #define pgprot_noncached(prot) \
169 ((boot_cpu_data.x86 > 3) \
170 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
171 : (prot))
172
173 #ifndef __ASSEMBLY__
174
175 #define pgprot_writecombine pgprot_writecombine
176 extern pgprot_t pgprot_writecombine(pgprot_t prot);
177
178 /*
179 * ZERO_PAGE is a global shared page that is always zero: used
180 * for zero-mapped memory areas etc..
181 */
182 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
183 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
184
185 extern spinlock_t pgd_lock;
186 extern struct list_head pgd_list;
187
188 /*
189 * The following only work if pte_present() is true.
190 * Undefined behaviour if not..
191 */
192 static inline int pte_dirty(pte_t pte)
193 {
194 return pte_flags(pte) & _PAGE_DIRTY;
195 }
196
197 static inline int pte_young(pte_t pte)
198 {
199 return pte_flags(pte) & _PAGE_ACCESSED;
200 }
201
202 static inline int pte_write(pte_t pte)
203 {
204 return pte_flags(pte) & _PAGE_RW;
205 }
206
207 static inline int pte_file(pte_t pte)
208 {
209 return pte_flags(pte) & _PAGE_FILE;
210 }
211
212 static inline int pte_huge(pte_t pte)
213 {
214 return pte_flags(pte) & _PAGE_PSE;
215 }
216
217 static inline int pte_global(pte_t pte)
218 {
219 return pte_flags(pte) & _PAGE_GLOBAL;
220 }
221
222 static inline int pte_exec(pte_t pte)
223 {
224 return !(pte_flags(pte) & _PAGE_NX);
225 }
226
227 static inline int pte_special(pte_t pte)
228 {
229 return pte_flags(pte) & _PAGE_SPECIAL;
230 }
231
232 static inline unsigned long pte_pfn(pte_t pte)
233 {
234 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
235 }
236
237 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
238
239 static inline int pmd_large(pmd_t pte)
240 {
241 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
242 (_PAGE_PSE | _PAGE_PRESENT);
243 }
244
245 static inline pte_t pte_mkclean(pte_t pte)
246 {
247 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
248 }
249
250 static inline pte_t pte_mkold(pte_t pte)
251 {
252 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
253 }
254
255 static inline pte_t pte_wrprotect(pte_t pte)
256 {
257 return __pte(pte_val(pte) & ~_PAGE_RW);
258 }
259
260 static inline pte_t pte_mkexec(pte_t pte)
261 {
262 return __pte(pte_val(pte) & ~_PAGE_NX);
263 }
264
265 static inline pte_t pte_mkdirty(pte_t pte)
266 {
267 return __pte(pte_val(pte) | _PAGE_DIRTY);
268 }
269
270 static inline pte_t pte_mkyoung(pte_t pte)
271 {
272 return __pte(pte_val(pte) | _PAGE_ACCESSED);
273 }
274
275 static inline pte_t pte_mkwrite(pte_t pte)
276 {
277 return __pte(pte_val(pte) | _PAGE_RW);
278 }
279
280 static inline pte_t pte_mkhuge(pte_t pte)
281 {
282 return __pte(pte_val(pte) | _PAGE_PSE);
283 }
284
285 static inline pte_t pte_clrhuge(pte_t pte)
286 {
287 return __pte(pte_val(pte) & ~_PAGE_PSE);
288 }
289
290 static inline pte_t pte_mkglobal(pte_t pte)
291 {
292 return __pte(pte_val(pte) | _PAGE_GLOBAL);
293 }
294
295 static inline pte_t pte_clrglobal(pte_t pte)
296 {
297 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
298 }
299
300 static inline pte_t pte_mkspecial(pte_t pte)
301 {
302 return __pte(pte_val(pte) | _PAGE_SPECIAL);
303 }
304
305 extern pteval_t __supported_pte_mask;
306
307 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
308 {
309 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
310 pgprot_val(pgprot)) & __supported_pte_mask);
311 }
312
313 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
314 {
315 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
316 pgprot_val(pgprot)) & __supported_pte_mask);
317 }
318
319 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
320 {
321 pteval_t val = pte_val(pte);
322
323 /*
324 * Chop off the NX bit (if present), and add the NX portion of
325 * the newprot (if present):
326 */
327 val &= _PAGE_CHG_MASK;
328 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
329
330 return __pte(val);
331 }
332
333 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
334 #define pgprot_modify pgprot_modify
335 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
336 {
337 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
338 pgprotval_t addbits = pgprot_val(newprot);
339 return __pgprot(preservebits | addbits);
340 }
341
342 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
343
344 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
345
346 static inline int is_new_memtype_allowed(unsigned long flags,
347 unsigned long new_flags)
348 {
349 /*
350 * Certain new memtypes are not allowed with certain
351 * requested memtype:
352 * - request is uncached, return cannot be write-back
353 * - request is write-combine, return cannot be write-back
354 */
355 if ((flags == _PAGE_CACHE_UC_MINUS &&
356 new_flags == _PAGE_CACHE_WB) ||
357 (flags == _PAGE_CACHE_WC &&
358 new_flags == _PAGE_CACHE_WB)) {
359 return 0;
360 }
361
362 return 1;
363 }
364
365 #ifndef __ASSEMBLY__
366 /* Indicate that x86 has its own track and untrack pfn vma functions */
367 #define __HAVE_PFNMAP_TRACKING
368
369 #define __HAVE_PHYS_MEM_ACCESS_PROT
370 struct file;
371 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
372 unsigned long size, pgprot_t vma_prot);
373 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
374 unsigned long size, pgprot_t *vma_prot);
375 #endif
376
377 /* Install a pte for a particular vaddr in kernel space. */
378 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
379
380 #ifdef CONFIG_X86_32
381 extern void native_pagetable_setup_start(pgd_t *base);
382 extern void native_pagetable_setup_done(pgd_t *base);
383 #else
384 static inline void native_pagetable_setup_start(pgd_t *base) {}
385 static inline void native_pagetable_setup_done(pgd_t *base) {}
386 #endif
387
388 struct seq_file;
389 extern void arch_report_meminfo(struct seq_file *m);
390
391 #ifdef CONFIG_PARAVIRT
392 #include <asm/paravirt.h>
393 #else /* !CONFIG_PARAVIRT */
394 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
395 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
396
397 #define set_pte_present(mm, addr, ptep, pte) \
398 native_set_pte_present(mm, addr, ptep, pte)
399 #define set_pte_atomic(ptep, pte) \
400 native_set_pte_atomic(ptep, pte)
401
402 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
403
404 #ifndef __PAGETABLE_PUD_FOLDED
405 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
406 #define pgd_clear(pgd) native_pgd_clear(pgd)
407 #endif
408
409 #ifndef set_pud
410 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
411 #endif
412
413 #ifndef __PAGETABLE_PMD_FOLDED
414 #define pud_clear(pud) native_pud_clear(pud)
415 #endif
416
417 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
418 #define pmd_clear(pmd) native_pmd_clear(pmd)
419
420 #define pte_update(mm, addr, ptep) do { } while (0)
421 #define pte_update_defer(mm, addr, ptep) do { } while (0)
422
423 static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
424 {
425 native_pagetable_setup_start(base);
426 }
427
428 static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
429 {
430 native_pagetable_setup_done(base);
431 }
432 #endif /* CONFIG_PARAVIRT */
433
434 #endif /* __ASSEMBLY__ */
435
436 #ifdef CONFIG_X86_32
437 # include "pgtable_32.h"
438 #else
439 # include "pgtable_64.h"
440 #endif
441
442 #ifndef __ASSEMBLY__
443 #include <linux/mm_types.h>
444
445 static inline int pte_none(pte_t pte)
446 {
447 return !pte.pte;
448 }
449
450 #define __HAVE_ARCH_PTE_SAME
451 static inline int pte_same(pte_t a, pte_t b)
452 {
453 return a.pte == b.pte;
454 }
455
456 static inline int pte_present(pte_t a)
457 {
458 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
459 }
460
461 static inline int pmd_present(pmd_t pmd)
462 {
463 return pmd_flags(pmd) & _PAGE_PRESENT;
464 }
465
466 static inline int pmd_none(pmd_t pmd)
467 {
468 /* Only check low word on 32-bit platforms, since it might be
469 out of sync with upper half. */
470 return (unsigned long)native_pmd_val(pmd) == 0;
471 }
472
473 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
474 {
475 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
476 }
477
478 /*
479 * Currently stuck as a macro due to indirect forward reference to
480 * linux/mmzone.h's __section_mem_map_addr() definition:
481 */
482 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
483
484 /*
485 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
486 *
487 * this macro returns the index of the entry in the pmd page which would
488 * control the given virtual address
489 */
490 static inline unsigned pmd_index(unsigned long address)
491 {
492 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
493 }
494
495 /*
496 * Conversion functions: convert a page and protection to a page entry,
497 * and a page entry and page directory to the page they refer to.
498 *
499 * (Currently stuck as a macro because of indirect forward reference
500 * to linux/mm.h:page_to_nid())
501 */
502 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
503
504 /*
505 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
506 *
507 * this function returns the index of the entry in the pte page which would
508 * control the given virtual address
509 */
510 static inline unsigned pte_index(unsigned long address)
511 {
512 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
513 }
514
515 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
516 {
517 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
518 }
519
520 static inline int pmd_bad(pmd_t pmd)
521 {
522 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
523 }
524
525 static inline unsigned long pages_to_mb(unsigned long npg)
526 {
527 return npg >> (20 - PAGE_SHIFT);
528 }
529
530 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
531 remap_pfn_range(vma, vaddr, pfn, size, prot)
532
533 #if PAGETABLE_LEVELS == 2
534 static inline int pud_large(pud_t pud)
535 {
536 return 0;
537 }
538 #endif
539
540 #if PAGETABLE_LEVELS > 2
541 static inline int pud_none(pud_t pud)
542 {
543 return native_pud_val(pud) == 0;
544 }
545
546 static inline int pud_present(pud_t pud)
547 {
548 return pud_flags(pud) & _PAGE_PRESENT;
549 }
550
551 static inline unsigned long pud_page_vaddr(pud_t pud)
552 {
553 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
554 }
555
556 /*
557 * Currently stuck as a macro due to indirect forward reference to
558 * linux/mmzone.h's __section_mem_map_addr() definition:
559 */
560 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
561
562 /* Find an entry in the second-level page table.. */
563 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
564 {
565 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
566 }
567
568 static inline unsigned long pmd_pfn(pmd_t pmd)
569 {
570 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
571 }
572
573 static inline int pud_large(pud_t pud)
574 {
575 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
576 (_PAGE_PSE | _PAGE_PRESENT);
577 }
578
579 static inline int pud_bad(pud_t pud)
580 {
581 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
582 }
583 #endif /* PAGETABLE_LEVELS > 2 */
584
585 #if PAGETABLE_LEVELS > 3
586 static inline int pgd_present(pgd_t pgd)
587 {
588 return pgd_flags(pgd) & _PAGE_PRESENT;
589 }
590
591 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
592 {
593 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
594 }
595
596 /*
597 * Currently stuck as a macro due to indirect forward reference to
598 * linux/mmzone.h's __section_mem_map_addr() definition:
599 */
600 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
601
602 /* to find an entry in a page-table-directory. */
603 static inline unsigned pud_index(unsigned long address)
604 {
605 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
606 }
607
608 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
609 {
610 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
611 }
612
613 static inline int pgd_bad(pgd_t pgd)
614 {
615 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
616 }
617
618 static inline int pgd_none(pgd_t pgd)
619 {
620 return !native_pgd_val(pgd);
621 }
622 #endif /* PAGETABLE_LEVELS > 3 */
623
624 #endif /* __ASSEMBLY__ */
625
626 /*
627 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
628 *
629 * this macro returns the index of the entry in the pgd page which would
630 * control the given virtual address
631 */
632 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
633
634 /*
635 * pgd_offset() returns a (pgd_t *)
636 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
637 */
638 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
639 /*
640 * a shortcut which implies the use of the kernel's pgd, instead
641 * of a process's
642 */
643 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
644
645
646 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
647 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
648
649 #ifndef __ASSEMBLY__
650
651 enum {
652 PG_LEVEL_NONE,
653 PG_LEVEL_4K,
654 PG_LEVEL_2M,
655 PG_LEVEL_1G,
656 PG_LEVEL_NUM
657 };
658
659 #ifdef CONFIG_PROC_FS
660 extern void update_page_count(int level, unsigned long pages);
661 #else
662 static inline void update_page_count(int level, unsigned long pages) { }
663 #endif
664
665 /*
666 * Helper function that returns the kernel pagetable entry controlling
667 * the virtual address 'address'. NULL means no pagetable entry present.
668 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
669 * as a pte too.
670 */
671 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
672
673 /* local pte updates need not use xchg for locking */
674 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
675 {
676 pte_t res = *ptep;
677
678 /* Pure native function needs no input for mm, addr */
679 native_pte_clear(NULL, 0, ptep);
680 return res;
681 }
682
683 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
684 pte_t *ptep , pte_t pte)
685 {
686 native_set_pte(ptep, pte);
687 }
688
689 #ifndef CONFIG_PARAVIRT
690 /*
691 * Rules for using pte_update - it must be called after any PTE update which
692 * has not been done using the set_pte / clear_pte interfaces. It is used by
693 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
694 * updates should either be sets, clears, or set_pte_atomic for P->P
695 * transitions, which means this hook should only be called for user PTEs.
696 * This hook implies a P->P protection or access change has taken place, which
697 * requires a subsequent TLB flush. The notification can optionally be delayed
698 * until the TLB flush event by using the pte_update_defer form of the
699 * interface, but care must be taken to assure that the flush happens while
700 * still holding the same page table lock so that the shadow and primary pages
701 * do not become out of sync on SMP.
702 */
703 #define pte_update(mm, addr, ptep) do { } while (0)
704 #define pte_update_defer(mm, addr, ptep) do { } while (0)
705 #endif
706
707 /*
708 * We only update the dirty/accessed state if we set
709 * the dirty bit by hand in the kernel, since the hardware
710 * will do the accessed bit for us, and we don't want to
711 * race with other CPU's that might be updating the dirty
712 * bit at the same time.
713 */
714 struct vm_area_struct;
715
716 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
717 extern int ptep_set_access_flags(struct vm_area_struct *vma,
718 unsigned long address, pte_t *ptep,
719 pte_t entry, int dirty);
720
721 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
722 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
723 unsigned long addr, pte_t *ptep);
724
725 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
726 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
727 unsigned long address, pte_t *ptep);
728
729 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
730 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
731 pte_t *ptep)
732 {
733 pte_t pte = native_ptep_get_and_clear(ptep);
734 pte_update(mm, addr, ptep);
735 return pte;
736 }
737
738 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
739 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
740 unsigned long addr, pte_t *ptep,
741 int full)
742 {
743 pte_t pte;
744 if (full) {
745 /*
746 * Full address destruction in progress; paravirt does not
747 * care about updates and native needs no locking
748 */
749 pte = native_local_ptep_get_and_clear(ptep);
750 } else {
751 pte = ptep_get_and_clear(mm, addr, ptep);
752 }
753 return pte;
754 }
755
756 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
757 static inline void ptep_set_wrprotect(struct mm_struct *mm,
758 unsigned long addr, pte_t *ptep)
759 {
760 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
761 pte_update(mm, addr, ptep);
762 }
763
764 /*
765 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
766 *
767 * dst - pointer to pgd range anwhere on a pgd page
768 * src - ""
769 * count - the number of pgds to copy.
770 *
771 * dst and src can be on the same page, but the range must not overlap,
772 * and must not cross a page boundary.
773 */
774 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
775 {
776 memcpy(dst, src, count * sizeof(pgd_t));
777 }
778
779
780 #include <asm-generic/pgtable.h>
781 #endif /* __ASSEMBLY__ */
782
783 #endif /* _ASM_X86_PGTABLE_H */
This page took 0.076705 seconds and 5 git commands to generate.