x86: remove the Xen-specific _PAGE_IOMAP PTE flag
[deliverable/linux.git] / arch / x86 / include / asm / pgtable_types.h
1 #ifndef _ASM_X86_PGTABLE_DEFS_H
2 #define _ASM_X86_PGTABLE_DEFS_H
3
4 #include <linux/const.h>
5 #include <asm/page_types.h>
6
7 #define FIRST_USER_ADDRESS 0
8
9 #define _PAGE_BIT_PRESENT 0 /* is present */
10 #define _PAGE_BIT_RW 1 /* writeable */
11 #define _PAGE_BIT_USER 2 /* userspace addressable */
12 #define _PAGE_BIT_PWT 3 /* page write through */
13 #define _PAGE_BIT_PCD 4 /* page cache disabled */
14 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
15 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
16 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
18 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
19 #define _PAGE_BIT_SOFTW1 9 /* available for programmer */
20 #define _PAGE_BIT_SOFTW2 10 /* " */
21 #define _PAGE_BIT_SOFTW3 11 /* " */
22 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23 #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
24 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
25 #define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
26 #define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
27 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
28 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
29
30 /*
31 * Swap offsets on configurations that allow automatic NUMA balancing use the
32 * bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
33 * swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
34 * maximum possible swap space from 16TB to 8TB.
35 */
36 #define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
37
38 /* If _PAGE_BIT_PRESENT is clear, we use these: */
39 /* - if the user mapped it with PROT_NONE; pte_present gives true */
40 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
41 /* - set: nonlinear file mapping, saved PTE; unset:swap */
42 #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
43
44 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
45 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
46 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
47 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
48 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
49 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
50 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
51 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
52 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
53 #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
54 #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
55 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
56 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
57 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
58 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
59 #define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
60 #define __HAVE_ARCH_PTE_SPECIAL
61
62 #ifdef CONFIG_KMEMCHECK
63 #define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
64 #else
65 #define _PAGE_HIDDEN (_AT(pteval_t, 0))
66 #endif
67
68 /*
69 * The same hidden bit is used by kmemcheck, but since kmemcheck
70 * works on kernel pages while soft-dirty engine on user space,
71 * they do not conflict with each other.
72 */
73
74 #ifdef CONFIG_MEM_SOFT_DIRTY
75 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
76 #else
77 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
78 #endif
79
80 /*
81 * _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
82 * that is not present. The hinting fault gathers numa placement statistics
83 * (see pte_numa()). The bit is always zero when the PTE is not present.
84 *
85 * The bit picked must be always zero when the pmd is present and not
86 * present, so that we don't lose information when we set it while
87 * atomically clearing the present bit.
88 */
89 #ifdef CONFIG_NUMA_BALANCING
90 #define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
91 #else
92 #define _PAGE_NUMA (_AT(pteval_t, 0))
93 #endif
94
95 /*
96 * Tracking soft dirty bit when a page goes to a swap is tricky.
97 * We need a bit which can be stored in pte _and_ not conflict
98 * with swap entry format. On x86 bits 6 and 7 are *not* involved
99 * into swap entry computation, but bit 6 is used for nonlinear
100 * file mapping, so we borrow bit 7 for soft dirty tracking.
101 *
102 * Please note that this bit must be treated as swap dirty page
103 * mark if and only if the PTE has present bit clear!
104 */
105 #ifdef CONFIG_MEM_SOFT_DIRTY
106 #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
107 #else
108 #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
109 #endif
110
111 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
112 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
113 #else
114 #define _PAGE_NX (_AT(pteval_t, 0))
115 #endif
116
117 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
118 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
119
120 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
121 _PAGE_ACCESSED | _PAGE_DIRTY)
122 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
123 _PAGE_DIRTY)
124
125 /* Set of bits not changed in pte_modify */
126 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
127 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
128 _PAGE_SOFT_DIRTY | _PAGE_NUMA)
129 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
130
131 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
132 #define _PAGE_CACHE_WB (0)
133 #define _PAGE_CACHE_WC (_PAGE_PWT)
134 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
135 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
136
137 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
138 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
139 _PAGE_ACCESSED | _PAGE_NX)
140
141 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
142 _PAGE_USER | _PAGE_ACCESSED)
143 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
144 _PAGE_ACCESSED | _PAGE_NX)
145 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
146 _PAGE_ACCESSED)
147 #define PAGE_COPY PAGE_COPY_NOEXEC
148 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
149 _PAGE_ACCESSED | _PAGE_NX)
150 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
151 _PAGE_ACCESSED)
152
153 #define __PAGE_KERNEL_EXEC \
154 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
155 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
156
157 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
158 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
159 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
160 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
161 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
162 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
163 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
164 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
165 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
166 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
167 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
168 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
169
170 #define __PAGE_KERNEL_IO (__PAGE_KERNEL)
171 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
172 #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS)
173 #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC)
174
175 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
176 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
177 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
178 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
179 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
180 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
181 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
182 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
183 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
184 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
185 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
186 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
187 #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
188 #define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
189
190 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
191 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
192 #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
193 #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
194
195 /* xwr */
196 #define __P000 PAGE_NONE
197 #define __P001 PAGE_READONLY
198 #define __P010 PAGE_COPY
199 #define __P011 PAGE_COPY
200 #define __P100 PAGE_READONLY_EXEC
201 #define __P101 PAGE_READONLY_EXEC
202 #define __P110 PAGE_COPY_EXEC
203 #define __P111 PAGE_COPY_EXEC
204
205 #define __S000 PAGE_NONE
206 #define __S001 PAGE_READONLY
207 #define __S010 PAGE_SHARED
208 #define __S011 PAGE_SHARED
209 #define __S100 PAGE_READONLY_EXEC
210 #define __S101 PAGE_READONLY_EXEC
211 #define __S110 PAGE_SHARED_EXEC
212 #define __S111 PAGE_SHARED_EXEC
213
214 /*
215 * early identity mapping pte attrib macros.
216 */
217 #ifdef CONFIG_X86_64
218 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
219 #else
220 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
221 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
222 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
223 #endif
224
225 #ifdef CONFIG_X86_32
226 # include <asm/pgtable_32_types.h>
227 #else
228 # include <asm/pgtable_64_types.h>
229 #endif
230
231 #ifndef __ASSEMBLY__
232
233 #include <linux/types.h>
234
235 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
236 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
237
238 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
239 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
240
241 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
242
243 typedef struct { pgdval_t pgd; } pgd_t;
244
245 static inline pgd_t native_make_pgd(pgdval_t val)
246 {
247 return (pgd_t) { val };
248 }
249
250 static inline pgdval_t native_pgd_val(pgd_t pgd)
251 {
252 return pgd.pgd;
253 }
254
255 static inline pgdval_t pgd_flags(pgd_t pgd)
256 {
257 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
258 }
259
260 #if PAGETABLE_LEVELS > 3
261 typedef struct { pudval_t pud; } pud_t;
262
263 static inline pud_t native_make_pud(pmdval_t val)
264 {
265 return (pud_t) { val };
266 }
267
268 static inline pudval_t native_pud_val(pud_t pud)
269 {
270 return pud.pud;
271 }
272 #else
273 #include <asm-generic/pgtable-nopud.h>
274
275 static inline pudval_t native_pud_val(pud_t pud)
276 {
277 return native_pgd_val(pud.pgd);
278 }
279 #endif
280
281 #if PAGETABLE_LEVELS > 2
282 typedef struct { pmdval_t pmd; } pmd_t;
283
284 static inline pmd_t native_make_pmd(pmdval_t val)
285 {
286 return (pmd_t) { val };
287 }
288
289 static inline pmdval_t native_pmd_val(pmd_t pmd)
290 {
291 return pmd.pmd;
292 }
293 #else
294 #include <asm-generic/pgtable-nopmd.h>
295
296 static inline pmdval_t native_pmd_val(pmd_t pmd)
297 {
298 return native_pgd_val(pmd.pud.pgd);
299 }
300 #endif
301
302 static inline pudval_t pud_flags(pud_t pud)
303 {
304 return native_pud_val(pud) & PTE_FLAGS_MASK;
305 }
306
307 static inline pmdval_t pmd_flags(pmd_t pmd)
308 {
309 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
310 }
311
312 static inline pte_t native_make_pte(pteval_t val)
313 {
314 return (pte_t) { .pte = val };
315 }
316
317 static inline pteval_t native_pte_val(pte_t pte)
318 {
319 return pte.pte;
320 }
321
322 static inline pteval_t pte_flags(pte_t pte)
323 {
324 return native_pte_val(pte) & PTE_FLAGS_MASK;
325 }
326
327 #define pgprot_val(x) ((x).pgprot)
328 #define __pgprot(x) ((pgprot_t) { (x) } )
329
330
331 typedef struct page *pgtable_t;
332
333 extern pteval_t __supported_pte_mask;
334 extern void set_nx(void);
335 extern int nx_enabled;
336
337 #define pgprot_writecombine pgprot_writecombine
338 extern pgprot_t pgprot_writecombine(pgprot_t prot);
339
340 /* Indicate that x86 has its own track and untrack pfn vma functions */
341 #define __HAVE_PFNMAP_TRACKING
342
343 #define __HAVE_PHYS_MEM_ACCESS_PROT
344 struct file;
345 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
346 unsigned long size, pgprot_t vma_prot);
347 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
348 unsigned long size, pgprot_t *vma_prot);
349
350 /* Install a pte for a particular vaddr in kernel space. */
351 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
352
353 #ifdef CONFIG_X86_32
354 extern void native_pagetable_init(void);
355 #else
356 #define native_pagetable_init paging_init
357 #endif
358
359 struct seq_file;
360 extern void arch_report_meminfo(struct seq_file *m);
361
362 enum pg_level {
363 PG_LEVEL_NONE,
364 PG_LEVEL_4K,
365 PG_LEVEL_2M,
366 PG_LEVEL_1G,
367 PG_LEVEL_NUM
368 };
369
370 #ifdef CONFIG_PROC_FS
371 extern void update_page_count(int level, unsigned long pages);
372 #else
373 static inline void update_page_count(int level, unsigned long pages) { }
374 #endif
375
376 /*
377 * Helper function that returns the kernel pagetable entry controlling
378 * the virtual address 'address'. NULL means no pagetable entry present.
379 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
380 * as a pte too.
381 */
382 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
383 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
384 unsigned int *level);
385 extern phys_addr_t slow_virt_to_phys(void *__address);
386 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
387 unsigned numpages, unsigned long page_flags);
388 void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
389 unsigned numpages);
390 #endif /* !__ASSEMBLY__ */
391
392 #endif /* _ASM_X86_PGTABLE_DEFS_H */
This page took 0.03901 seconds and 5 git commands to generate.