x86: page.h: move and unify types for pagetable entry, #6
[deliverable/linux.git] / include / asm-x86 / page.h
CommitLineData
83a5101b
JF
1#ifndef _ASM_X86_PAGE_H
2#define _ASM_X86_PAGE_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
3da1bcc2 12#define PTE_MASK PHYSICAL_PAGE_MASK
83a5101b
JF
13
14#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
15#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
16
17#define HPAGE_SHIFT PMD_SHIFT
18#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
19#define HPAGE_MASK (~(HPAGE_SIZE - 1))
20#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
21
22/* to align the pointer to the (next) page boundary */
23#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
24
6724a1d2 25#define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
83a5101b
JF
26#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
27
3da1bcc2
IM
28#ifndef __ASSEMBLY__
29#include <linux/types.h>
30#endif
83a5101b
JF
31
32#ifdef CONFIG_X86_64
3da1bcc2
IM
33#define PAGETABLE_LEVELS 4
34
83a5101b
JF
35#define THREAD_ORDER 1
36#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
37#define CURRENT_MASK (~(THREAD_SIZE-1))
38
39#define EXCEPTION_STACK_ORDER 0
40#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
41
42#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
43#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
44
45#define IRQSTACK_ORDER 2
46#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
47
48#define STACKFAULT_STACK 1
49#define DOUBLEFAULT_STACK 2
50#define NMI_STACK 3
51#define DEBUG_STACK 4
52#define MCE_STACK 5
53#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
54
55#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
56
57#define __PHYSICAL_START CONFIG_PHYSICAL_START
58#define __KERNEL_ALIGN 0x200000
59
60/*
61 * Make sure kernel is aligned to 2MB address. Catching it at compile
62 * time is better. Change your config file and compile the kernel
63 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
64 */
65#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
66#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
67#endif
68
69#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
70#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
71
72/* See Documentation/x86_64/mm.txt for a description of the memory map. */
73#define __PHYSICAL_MASK_SHIFT 46
74#define __VIRTUAL_MASK_SHIFT 48
75
76#define KERNEL_TEXT_SIZE (40*1024*1024)
77#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
78
345b904c
JF
79#ifndef __ASSEMBLY__
80void clear_page(void *page);
81void copy_page(void *to, void *from);
ba2b6c52
IM
82
83/*
84 * These are used to make use of C type-checking..
85 */
86typedef unsigned long pteval_t;
87typedef unsigned long pmdval_t;
88typedef unsigned long pudval_t;
89typedef unsigned long pgdval_t;
90typedef unsigned long pgprotval_t;
91typedef unsigned long phys_addr_t;
92
93typedef struct { pteval_t pte; } pte_t;
94
d6e3cf63
IM
95#define native_pte_val(x) ((x).pte)
96#define native_make_pte(x) ((pte_t) { (x) } )
97
345b904c
JF
98#endif /* !__ASSEMBLY__ */
99
83a5101b
JF
100#endif /* CONFIG_X86_64 */
101
102#ifdef CONFIG_X86_32
103
104/*
105 * This handles the memory map.
106 *
107 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
108 * a virtual address space of one gigabyte, which limits the
109 * amount of physical memory you can use to about 950MB.
110 *
111 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
112 * and CONFIG_HIGHMEM64G options in the kernel configuration.
113 */
114#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
115
116#ifdef CONFIG_X86_PAE
117#define __PHYSICAL_MASK_SHIFT 36
118#define __VIRTUAL_MASK_SHIFT 32
881d90d0
JF
119#define PAGETABLE_LEVELS 3
120
121#ifndef __ASSEMBLY__
122typedef u64 pteval_t;
123typedef u64 pmdval_t;
124typedef u64 pudval_t;
125typedef u64 pgdval_t;
126typedef u64 pgprotval_t;
127typedef u64 phys_addr_t;
128
129typedef struct { unsigned long pte_low, pte_high; } pte_t;
130
131static inline unsigned long long native_pte_val(pte_t pte)
132{
133 return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
134}
135
136static inline pte_t native_make_pte(unsigned long long val)
137{
138 return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
139}
140
141#endif /* __ASSEMBLY__
142 */
83a5101b
JF
143#else /* !CONFIG_X86_PAE */
144#define __PHYSICAL_MASK_SHIFT 32
145#define __VIRTUAL_MASK_SHIFT 32
881d90d0
JF
146#define PAGETABLE_LEVELS 2
147
148#ifndef __ASSEMBLY__
149typedef unsigned long pteval_t;
150typedef unsigned long pmdval_t;
151typedef unsigned long pudval_t;
152typedef unsigned long pgdval_t;
153typedef unsigned long pgprotval_t;
154typedef unsigned long phys_addr_t;
155
156typedef struct { pteval_t pte_low; } pte_t;
157typedef pte_t boot_pte_t;
158
159static inline unsigned long native_pte_val(pte_t pte)
160{
161 return pte.pte_low;
162}
163
164static inline pte_t native_make_pte(unsigned long val)
165{
166 return (pte_t) { .pte_low = val };
167}
168
169#endif /* __ASSEMBLY__ */
83a5101b
JF
170#endif /* CONFIG_X86_PAE */
171
172#ifdef CONFIG_HUGETLB_PAGE
173#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
174#endif
175
345b904c
JF
176#ifndef __ASSEMBLY__
177#ifdef CONFIG_X86_USE_3DNOW
178#include <asm/mmx.h>
179
180static inline void clear_page(void *page)
181{
182 mmx_clear_page(page);
183}
184
185static inline void copy_page(void *to, void *from)
186{
187 mmx_copy_page(to, from);
188}
189#else /* !CONFIG_X86_USE_3DNOW */
190#include <linux/string.h>
191
192static inline void clear_page(void *page)
193{
194 memset(page, 0, PAGE_SIZE);
195}
196
197static inline void copy_page(void *to, void *from)
198{
199 memcpy(to, from, PAGE_SIZE);
200}
201#endif /* CONFIG_X86_3DNOW */
202#endif /* !__ASSEMBLY__ */
203
83a5101b
JF
204#endif /* CONFIG_X86_32 */
205
206#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
207
208#define VM_DATA_DEFAULT_FLAGS \
209 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
210 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
211
212
345b904c
JF
213#ifndef __ASSEMBLY__
214struct page;
215
216static void inline clear_user_page(void *page, unsigned long vaddr,
217 struct page *pg)
218{
219 clear_page(page);
220}
221
222static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
223 struct page *topage)
224{
225 copy_page(to, from);
226}
227
228#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
229 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
230#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
231
38f0f127
IM
232typedef struct { pgdval_t pgd; } pgd_t;
233typedef struct { pgprotval_t pgprot; } pgprot_t;
234
235static inline pgd_t native_make_pgd(pgdval_t val)
236{
237 return (pgd_t) { val };
238}
239
240static inline pgdval_t native_pgd_val(pgd_t pgd)
241{
242 return pgd.pgd;
243}
244
245#if PAGETABLE_LEVELS >= 3
246#if PAGETABLE_LEVELS == 4
247typedef struct { pudval_t pud; } pud_t;
248
249static inline pud_t native_make_pud(pmdval_t val)
250{
251 return (pud_t) { val };
252}
253
254static inline pudval_t native_pud_val(pud_t pud)
255{
256 return pud.pud;
257}
258#else /* PAGETABLE_LEVELS == 3 */
259#include <asm-generic/pgtable-nopud.h>
260#endif /* PAGETABLE_LEVELS == 4 */
261
262typedef struct { pmdval_t pmd; } pmd_t;
263
264static inline pmd_t native_make_pmd(pmdval_t val)
265{
266 return (pmd_t) { val };
267}
268
269static inline pmdval_t native_pmd_val(pmd_t pmd)
270{
271 return pmd.pmd;
272}
273#else /* PAGETABLE_LEVELS == 2 */
274#include <asm-generic/pgtable-nopmd.h>
275#endif /* PAGETABLE_LEVELS >= 3 */
276
277#define pgprot_val(x) ((x).pgprot)
278#define __pgprot(x) ((pgprot_t) { (x) } )
279
280#ifdef CONFIG_PARAVIRT
281#include <asm/paravirt.h>
282#else /* !CONFIG_PARAVIRT */
283
284#define pgd_val(x) native_pgd_val(x)
285#define __pgd(x) native_make_pgd(x)
286
287#ifndef __PAGETABLE_PUD_FOLDED
288#define pud_val(x) native_pud_val(x)
289#define __pud(x) native_make_pud(x)
290#endif
291
292#ifndef __PAGETABLE_PMD_FOLDED
293#define pmd_val(x) native_pmd_val(x)
294#define __pmd(x) native_make_pmd(x)
295#endif
296
297#define pte_val(x) native_pte_val(x)
298#define __pte(x) native_make_pte(x)
299
300#endif /* CONFIG_PARAVIRT */
301
345b904c
JF
302#endif /* __ASSEMBLY__ */
303
304
83a5101b
JF
305#ifdef CONFIG_X86_32
306# include "page_32.h"
96a388de 307#else
83a5101b 308# include "page_64.h"
96a388de 309#endif
83a5101b
JF
310
311#endif /* _ASM_X86_PAGE_H */
This page took 0.083922 seconds and 5 git commands to generate.