powerpc: Add asm/syscall.h with the tracehook entry points
[deliverable/linux.git] / include / asm-x86 / pgtable.h
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3
4 #define FIRST_USER_ADDRESS 0
5
6 #define _PAGE_BIT_PRESENT 0 /* is present */
7 #define _PAGE_BIT_RW 1 /* writeable */
8 #define _PAGE_BIT_USER 2 /* userspace addressable */
9 #define _PAGE_BIT_PWT 3 /* page write through */
10 #define _PAGE_BIT_PCD 4 /* page cache disabled */
11 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13 #define _PAGE_BIT_FILE 6
14 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_UNUSED2 10
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21 #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
22 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23
24 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
25 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
26 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
27 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
28 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
29 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
30 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
31 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
32 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
33 #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
34 #define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
35 #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
36 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
37 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
38 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
39 #define __HAVE_ARCH_PTE_SPECIAL
40
41 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
42 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
43 #else
44 #define _PAGE_NX (_AT(pteval_t, 0))
45 #endif
46
47 /* If _PAGE_PRESENT is clear, we use these: */
48 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
49 * saved PTE; unset:swap */
50 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
51 pte_present gives true */
52
53 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
54 _PAGE_ACCESSED | _PAGE_DIRTY)
55 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
56 _PAGE_DIRTY)
57
58 /* Set of bits not changed in pte_modify */
59 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
60 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
61
62 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
63 #define _PAGE_CACHE_WB (0)
64 #define _PAGE_CACHE_WC (_PAGE_PWT)
65 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
66 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
67
68 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
69 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
70 _PAGE_ACCESSED | _PAGE_NX)
71
72 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
73 _PAGE_USER | _PAGE_ACCESSED)
74 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
75 _PAGE_ACCESSED | _PAGE_NX)
76 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
77 _PAGE_ACCESSED)
78 #define PAGE_COPY PAGE_COPY_NOEXEC
79 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
80 _PAGE_ACCESSED | _PAGE_NX)
81 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED)
83
84 #define __PAGE_KERNEL_EXEC \
85 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
86 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
87
88 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
89 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
90 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
91 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
92 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
93 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
94 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
95 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
96 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
97 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
98 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
99
100 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
101 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
102 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
103 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
104 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
105 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
106 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
107 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
108 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
109 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
110 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
111 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
112 #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
113
114 /* xwr */
115 #define __P000 PAGE_NONE
116 #define __P001 PAGE_READONLY
117 #define __P010 PAGE_COPY
118 #define __P011 PAGE_COPY
119 #define __P100 PAGE_READONLY_EXEC
120 #define __P101 PAGE_READONLY_EXEC
121 #define __P110 PAGE_COPY_EXEC
122 #define __P111 PAGE_COPY_EXEC
123
124 #define __S000 PAGE_NONE
125 #define __S001 PAGE_READONLY
126 #define __S010 PAGE_SHARED
127 #define __S011 PAGE_SHARED
128 #define __S100 PAGE_READONLY_EXEC
129 #define __S101 PAGE_READONLY_EXEC
130 #define __S110 PAGE_SHARED_EXEC
131 #define __S111 PAGE_SHARED_EXEC
132
133 #ifndef __ASSEMBLY__
134
135 /*
136 * ZERO_PAGE is a global shared page that is always zero: used
137 * for zero-mapped memory areas etc..
138 */
139 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
140 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
141
142 extern spinlock_t pgd_lock;
143 extern struct list_head pgd_list;
144
145 /*
146 * The following only work if pte_present() is true.
147 * Undefined behaviour if not..
148 */
149 static inline int pte_dirty(pte_t pte)
150 {
151 return pte_flags(pte) & _PAGE_DIRTY;
152 }
153
154 static inline int pte_young(pte_t pte)
155 {
156 return pte_flags(pte) & _PAGE_ACCESSED;
157 }
158
159 static inline int pte_write(pte_t pte)
160 {
161 return pte_flags(pte) & _PAGE_RW;
162 }
163
164 static inline int pte_file(pte_t pte)
165 {
166 return pte_flags(pte) & _PAGE_FILE;
167 }
168
169 static inline int pte_huge(pte_t pte)
170 {
171 return pte_flags(pte) & _PAGE_PSE;
172 }
173
174 static inline int pte_global(pte_t pte)
175 {
176 return pte_flags(pte) & _PAGE_GLOBAL;
177 }
178
179 static inline int pte_exec(pte_t pte)
180 {
181 return !(pte_flags(pte) & _PAGE_NX);
182 }
183
184 static inline int pte_special(pte_t pte)
185 {
186 return pte_val(pte) & _PAGE_SPECIAL;
187 }
188
189 static inline int pmd_large(pmd_t pte)
190 {
191 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
192 (_PAGE_PSE | _PAGE_PRESENT);
193 }
194
195 static inline pte_t pte_mkclean(pte_t pte)
196 {
197 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
198 }
199
200 static inline pte_t pte_mkold(pte_t pte)
201 {
202 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
203 }
204
205 static inline pte_t pte_wrprotect(pte_t pte)
206 {
207 return __pte(pte_val(pte) & ~_PAGE_RW);
208 }
209
210 static inline pte_t pte_mkexec(pte_t pte)
211 {
212 return __pte(pte_val(pte) & ~_PAGE_NX);
213 }
214
215 static inline pte_t pte_mkdirty(pte_t pte)
216 {
217 return __pte(pte_val(pte) | _PAGE_DIRTY);
218 }
219
220 static inline pte_t pte_mkyoung(pte_t pte)
221 {
222 return __pte(pte_val(pte) | _PAGE_ACCESSED);
223 }
224
225 static inline pte_t pte_mkwrite(pte_t pte)
226 {
227 return __pte(pte_val(pte) | _PAGE_RW);
228 }
229
230 static inline pte_t pte_mkhuge(pte_t pte)
231 {
232 return __pte(pte_val(pte) | _PAGE_PSE);
233 }
234
235 static inline pte_t pte_clrhuge(pte_t pte)
236 {
237 return __pte(pte_val(pte) & ~_PAGE_PSE);
238 }
239
240 static inline pte_t pte_mkglobal(pte_t pte)
241 {
242 return __pte(pte_val(pte) | _PAGE_GLOBAL);
243 }
244
245 static inline pte_t pte_clrglobal(pte_t pte)
246 {
247 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
248 }
249
250 static inline pte_t pte_mkspecial(pte_t pte)
251 {
252 return __pte(pte_val(pte) | _PAGE_SPECIAL);
253 }
254
255 extern pteval_t __supported_pte_mask;
256
257 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
258 {
259 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
260 pgprot_val(pgprot)) & __supported_pte_mask);
261 }
262
263 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
264 {
265 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
266 pgprot_val(pgprot)) & __supported_pte_mask);
267 }
268
269 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
270 {
271 pteval_t val = pte_val(pte);
272
273 /*
274 * Chop off the NX bit (if present), and add the NX portion of
275 * the newprot (if present):
276 */
277 val &= _PAGE_CHG_MASK;
278 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
279
280 return __pte(val);
281 }
282
283 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
284 #define pgprot_modify pgprot_modify
285 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
286 {
287 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
288 pgprotval_t addbits = pgprot_val(newprot);
289 return __pgprot(preservebits | addbits);
290 }
291
292 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
293
294 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
295
296 #ifndef __ASSEMBLY__
297 #define __HAVE_PHYS_MEM_ACCESS_PROT
298 struct file;
299 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
300 unsigned long size, pgprot_t vma_prot);
301 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
302 unsigned long size, pgprot_t *vma_prot);
303 #endif
304
305 /* Install a pte for a particular vaddr in kernel space. */
306 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
307
308 #ifdef CONFIG_X86_32
309 extern void native_pagetable_setup_start(pgd_t *base);
310 extern void native_pagetable_setup_done(pgd_t *base);
311 #else
312 static inline void native_pagetable_setup_start(pgd_t *base) {}
313 static inline void native_pagetable_setup_done(pgd_t *base) {}
314 #endif
315
316 #ifdef CONFIG_PARAVIRT
317 #include <asm/paravirt.h>
318 #else /* !CONFIG_PARAVIRT */
319 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
320 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
321
322 #define set_pte_present(mm, addr, ptep, pte) \
323 native_set_pte_present(mm, addr, ptep, pte)
324 #define set_pte_atomic(ptep, pte) \
325 native_set_pte_atomic(ptep, pte)
326
327 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
328
329 #ifndef __PAGETABLE_PUD_FOLDED
330 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
331 #define pgd_clear(pgd) native_pgd_clear(pgd)
332 #endif
333
334 #ifndef set_pud
335 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
336 #endif
337
338 #ifndef __PAGETABLE_PMD_FOLDED
339 #define pud_clear(pud) native_pud_clear(pud)
340 #endif
341
342 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
343 #define pmd_clear(pmd) native_pmd_clear(pmd)
344
345 #define pte_update(mm, addr, ptep) do { } while (0)
346 #define pte_update_defer(mm, addr, ptep) do { } while (0)
347
348 static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
349 {
350 native_pagetable_setup_start(base);
351 }
352
353 static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
354 {
355 native_pagetable_setup_done(base);
356 }
357 #endif /* CONFIG_PARAVIRT */
358
359 #endif /* __ASSEMBLY__ */
360
361 #ifdef CONFIG_X86_32
362 # include "pgtable_32.h"
363 #else
364 # include "pgtable_64.h"
365 #endif
366
367 /*
368 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
369 *
370 * this macro returns the index of the entry in the pgd page which would
371 * control the given virtual address
372 */
373 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
374
375 /*
376 * pgd_offset() returns a (pgd_t *)
377 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
378 */
379 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
380 /*
381 * a shortcut which implies the use of the kernel's pgd, instead
382 * of a process's
383 */
384 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
385
386
387 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
388 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
389
390 #ifndef __ASSEMBLY__
391
392 enum {
393 PG_LEVEL_NONE,
394 PG_LEVEL_4K,
395 PG_LEVEL_2M,
396 PG_LEVEL_1G,
397 PG_LEVEL_NUM
398 };
399
400 #ifdef CONFIG_PROC_FS
401 extern void update_page_count(int level, unsigned long pages);
402 #else
403 static inline void update_page_count(int level, unsigned long pages) { }
404 #endif
405
406 /*
407 * Helper function that returns the kernel pagetable entry controlling
408 * the virtual address 'address'. NULL means no pagetable entry present.
409 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
410 * as a pte too.
411 */
412 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
413
414 /* local pte updates need not use xchg for locking */
415 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
416 {
417 pte_t res = *ptep;
418
419 /* Pure native function needs no input for mm, addr */
420 native_pte_clear(NULL, 0, ptep);
421 return res;
422 }
423
424 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
425 pte_t *ptep , pte_t pte)
426 {
427 native_set_pte(ptep, pte);
428 }
429
430 #ifndef CONFIG_PARAVIRT
431 /*
432 * Rules for using pte_update - it must be called after any PTE update which
433 * has not been done using the set_pte / clear_pte interfaces. It is used by
434 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
435 * updates should either be sets, clears, or set_pte_atomic for P->P
436 * transitions, which means this hook should only be called for user PTEs.
437 * This hook implies a P->P protection or access change has taken place, which
438 * requires a subsequent TLB flush. The notification can optionally be delayed
439 * until the TLB flush event by using the pte_update_defer form of the
440 * interface, but care must be taken to assure that the flush happens while
441 * still holding the same page table lock so that the shadow and primary pages
442 * do not become out of sync on SMP.
443 */
444 #define pte_update(mm, addr, ptep) do { } while (0)
445 #define pte_update_defer(mm, addr, ptep) do { } while (0)
446 #endif
447
448 /*
449 * We only update the dirty/accessed state if we set
450 * the dirty bit by hand in the kernel, since the hardware
451 * will do the accessed bit for us, and we don't want to
452 * race with other CPU's that might be updating the dirty
453 * bit at the same time.
454 */
455 struct vm_area_struct;
456
457 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
458 extern int ptep_set_access_flags(struct vm_area_struct *vma,
459 unsigned long address, pte_t *ptep,
460 pte_t entry, int dirty);
461
462 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
463 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
464 unsigned long addr, pte_t *ptep);
465
466 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
467 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
468 unsigned long address, pte_t *ptep);
469
470 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
471 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
472 pte_t *ptep)
473 {
474 pte_t pte = native_ptep_get_and_clear(ptep);
475 pte_update(mm, addr, ptep);
476 return pte;
477 }
478
479 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
480 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
481 unsigned long addr, pte_t *ptep,
482 int full)
483 {
484 pte_t pte;
485 if (full) {
486 /*
487 * Full address destruction in progress; paravirt does not
488 * care about updates and native needs no locking
489 */
490 pte = native_local_ptep_get_and_clear(ptep);
491 } else {
492 pte = ptep_get_and_clear(mm, addr, ptep);
493 }
494 return pte;
495 }
496
497 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
498 static inline void ptep_set_wrprotect(struct mm_struct *mm,
499 unsigned long addr, pte_t *ptep)
500 {
501 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
502 pte_update(mm, addr, ptep);
503 }
504
505 /*
506 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
507 *
508 * dst - pointer to pgd range anwhere on a pgd page
509 * src - ""
510 * count - the number of pgds to copy.
511 *
512 * dst and src can be on the same page, but the range must not overlap,
513 * and must not cross a page boundary.
514 */
515 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
516 {
517 memcpy(dst, src, count * sizeof(pgd_t));
518 }
519
520
521 #include <asm-generic/pgtable.h>
522 #endif /* __ASSEMBLY__ */
523
524 #endif /* _ASM_X86_PGTABLE_H */
This page took 0.050642 seconds and 5 git commands to generate.