ASoC: wm8997: Add inputs for noise and mic mixers
[deliverable/linux.git] / arch / x86 / include / asm / pgtable_types.h
1 #ifndef _ASM_X86_PGTABLE_DEFS_H
2 #define _ASM_X86_PGTABLE_DEFS_H
3
4 #include <linux/const.h>
5 #include <asm/page_types.h>
6
7 #define FIRST_USER_ADDRESS 0
8
9 #define _PAGE_BIT_PRESENT 0 /* is present */
10 #define _PAGE_BIT_RW 1 /* writeable */
11 #define _PAGE_BIT_USER 2 /* userspace addressable */
12 #define _PAGE_BIT_PWT 3 /* page write through */
13 #define _PAGE_BIT_PCD 4 /* page cache disabled */
14 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
15 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
16 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
18 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
19 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
20 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
21 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
22 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23 #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
24 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
25 #define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
26 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
27
28 /* If _PAGE_BIT_PRESENT is clear, we use these: */
29 /* - if the user mapped it with PROT_NONE; pte_present gives true */
30 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
31 /* - set: nonlinear file mapping, saved PTE; unset:swap */
32 #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
33
34 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
35 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
36 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
37 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
38 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
39 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
40 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
41 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
42 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
43 #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
44 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
45 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
46 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
47 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
48 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
49 #define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
50 #define __HAVE_ARCH_PTE_SPECIAL
51
52 #ifdef CONFIG_KMEMCHECK
53 #define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
54 #else
55 #define _PAGE_HIDDEN (_AT(pteval_t, 0))
56 #endif
57
58 /*
59 * The same hidden bit is used by kmemcheck, but since kmemcheck
60 * works on kernel pages while soft-dirty engine on user space,
61 * they do not conflict with each other.
62 */
63
64 #ifdef CONFIG_MEM_SOFT_DIRTY
65 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
66 #else
67 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68 #endif
69
70 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72 #else
73 #define _PAGE_NX (_AT(pteval_t, 0))
74 #endif
75
76 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
77 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
78
79 /*
80 * _PAGE_NUMA indicates that this page will trigger a numa hinting
81 * minor page fault to gather numa placement statistics (see
82 * pte_numa()). The bit picked (8) is within the range between
83 * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
84 * require changes to the swp entry format because that bit is always
85 * zero when the pte is not present.
86 *
87 * The bit picked must be always zero when the pmd is present and not
88 * present, so that we don't lose information when we set it while
89 * atomically clearing the present bit.
90 *
91 * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
92 * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
93 * couldn't reach, like handle_mm_fault() (see access_error in
94 * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
95 * handle_mm_fault() to be invoked).
96 */
97 #define _PAGE_NUMA _PAGE_PROTNONE
98
99 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
100 _PAGE_ACCESSED | _PAGE_DIRTY)
101 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
102 _PAGE_DIRTY)
103
104 /* Set of bits not changed in pte_modify */
105 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
106 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
107 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
108
109 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
110 #define _PAGE_CACHE_WB (0)
111 #define _PAGE_CACHE_WC (_PAGE_PWT)
112 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
113 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
114
115 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
116 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
117 _PAGE_ACCESSED | _PAGE_NX)
118
119 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
120 _PAGE_USER | _PAGE_ACCESSED)
121 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
122 _PAGE_ACCESSED | _PAGE_NX)
123 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
124 _PAGE_ACCESSED)
125 #define PAGE_COPY PAGE_COPY_NOEXEC
126 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
127 _PAGE_ACCESSED | _PAGE_NX)
128 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
129 _PAGE_ACCESSED)
130
131 #define __PAGE_KERNEL_EXEC \
132 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
133 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
134
135 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
136 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
137 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
138 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
139 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
140 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
141 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
142 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
143 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
144 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
145 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
146 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
147
148 #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
149 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
150 #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
151 #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
152
153 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
154 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
155 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
156 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
157 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
158 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
159 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
160 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
161 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
162 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
163 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
164 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
165 #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
166 #define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
167
168 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
169 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
170 #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
171 #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
172
173 /* xwr */
174 #define __P000 PAGE_NONE
175 #define __P001 PAGE_READONLY
176 #define __P010 PAGE_COPY
177 #define __P011 PAGE_COPY
178 #define __P100 PAGE_READONLY_EXEC
179 #define __P101 PAGE_READONLY_EXEC
180 #define __P110 PAGE_COPY_EXEC
181 #define __P111 PAGE_COPY_EXEC
182
183 #define __S000 PAGE_NONE
184 #define __S001 PAGE_READONLY
185 #define __S010 PAGE_SHARED
186 #define __S011 PAGE_SHARED
187 #define __S100 PAGE_READONLY_EXEC
188 #define __S101 PAGE_READONLY_EXEC
189 #define __S110 PAGE_SHARED_EXEC
190 #define __S111 PAGE_SHARED_EXEC
191
192 /*
193 * early identity mapping pte attrib macros.
194 */
195 #ifdef CONFIG_X86_64
196 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
197 #else
198 /*
199 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
200 * bits are combined, this will alow user to access the high address mapped
201 * VDSO in the presence of CONFIG_COMPAT_VDSO
202 */
203 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
204 #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
205 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
206 #endif
207
208 #ifdef CONFIG_X86_32
209 # include <asm/pgtable_32_types.h>
210 #else
211 # include <asm/pgtable_64_types.h>
212 #endif
213
214 #ifndef __ASSEMBLY__
215
216 #include <linux/types.h>
217
218 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
219 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
220
221 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
222 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
223
224 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
225
226 typedef struct { pgdval_t pgd; } pgd_t;
227
228 static inline pgd_t native_make_pgd(pgdval_t val)
229 {
230 return (pgd_t) { val };
231 }
232
233 static inline pgdval_t native_pgd_val(pgd_t pgd)
234 {
235 return pgd.pgd;
236 }
237
238 static inline pgdval_t pgd_flags(pgd_t pgd)
239 {
240 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
241 }
242
243 #if PAGETABLE_LEVELS > 3
244 typedef struct { pudval_t pud; } pud_t;
245
246 static inline pud_t native_make_pud(pmdval_t val)
247 {
248 return (pud_t) { val };
249 }
250
251 static inline pudval_t native_pud_val(pud_t pud)
252 {
253 return pud.pud;
254 }
255 #else
256 #include <asm-generic/pgtable-nopud.h>
257
258 static inline pudval_t native_pud_val(pud_t pud)
259 {
260 return native_pgd_val(pud.pgd);
261 }
262 #endif
263
264 #if PAGETABLE_LEVELS > 2
265 typedef struct { pmdval_t pmd; } pmd_t;
266
267 static inline pmd_t native_make_pmd(pmdval_t val)
268 {
269 return (pmd_t) { val };
270 }
271
272 static inline pmdval_t native_pmd_val(pmd_t pmd)
273 {
274 return pmd.pmd;
275 }
276 #else
277 #include <asm-generic/pgtable-nopmd.h>
278
279 static inline pmdval_t native_pmd_val(pmd_t pmd)
280 {
281 return native_pgd_val(pmd.pud.pgd);
282 }
283 #endif
284
285 static inline pudval_t pud_flags(pud_t pud)
286 {
287 return native_pud_val(pud) & PTE_FLAGS_MASK;
288 }
289
290 static inline pmdval_t pmd_flags(pmd_t pmd)
291 {
292 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
293 }
294
295 static inline pte_t native_make_pte(pteval_t val)
296 {
297 return (pte_t) { .pte = val };
298 }
299
300 static inline pteval_t native_pte_val(pte_t pte)
301 {
302 return pte.pte;
303 }
304
305 static inline pteval_t pte_flags(pte_t pte)
306 {
307 return native_pte_val(pte) & PTE_FLAGS_MASK;
308 }
309
310 #define pgprot_val(x) ((x).pgprot)
311 #define __pgprot(x) ((pgprot_t) { (x) } )
312
313
314 typedef struct page *pgtable_t;
315
316 extern pteval_t __supported_pte_mask;
317 extern void set_nx(void);
318 extern int nx_enabled;
319
320 #define pgprot_writecombine pgprot_writecombine
321 extern pgprot_t pgprot_writecombine(pgprot_t prot);
322
323 /* Indicate that x86 has its own track and untrack pfn vma functions */
324 #define __HAVE_PFNMAP_TRACKING
325
326 #define __HAVE_PHYS_MEM_ACCESS_PROT
327 struct file;
328 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
329 unsigned long size, pgprot_t vma_prot);
330 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
331 unsigned long size, pgprot_t *vma_prot);
332
333 /* Install a pte for a particular vaddr in kernel space. */
334 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
335
336 #ifdef CONFIG_X86_32
337 extern void native_pagetable_init(void);
338 #else
339 #define native_pagetable_init paging_init
340 #endif
341
342 struct seq_file;
343 extern void arch_report_meminfo(struct seq_file *m);
344
345 enum pg_level {
346 PG_LEVEL_NONE,
347 PG_LEVEL_4K,
348 PG_LEVEL_2M,
349 PG_LEVEL_1G,
350 PG_LEVEL_NUM
351 };
352
353 #ifdef CONFIG_PROC_FS
354 extern void update_page_count(int level, unsigned long pages);
355 #else
356 static inline void update_page_count(int level, unsigned long pages) { }
357 #endif
358
359 /*
360 * Helper function that returns the kernel pagetable entry controlling
361 * the virtual address 'address'. NULL means no pagetable entry present.
362 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
363 * as a pte too.
364 */
365 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
366 extern phys_addr_t slow_virt_to_phys(void *__address);
367
368 #endif /* !__ASSEMBLY__ */
369
370 #endif /* _ASM_X86_PGTABLE_DEFS_H */
This page took 0.03901 seconds and 5 git commands to generate.