Merge branch 'for-rmk/perf' into for-rmk/virt/kvm/core
[deliverable/linux.git] / arch / arm / include / asm / pgtable.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/pgtable.h
1da177e4
LT
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
f6e3354d 13#include <linux/const.h>
002547b4
RK
14#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
a32618d2 18#include <asm-generic/4level-fixup.h>
a1ce3928 19#include <asm/pgtable-nommu.h>
002547b4
RK
20
21#else
1da177e4 22
a32618d2 23#include <asm-generic/pgtable-nopud.h>
1da177e4 24#include <asm/memory.h>
ad1ae2fe 25#include <asm/pgtable-hwdef.h>
1da177e4 26
dcfdae04
CM
27#ifdef CONFIG_ARM_LPAE
28#include <asm/pgtable-3level.h>
29#else
17f57211 30#include <asm/pgtable-2level.h>
dcfdae04 31#endif
17f57211 32
5c3073e6
RK
33/*
34 * Just any arbitrary offset to the start of the vmalloc VM area: the
35 * current 8MB value just means that there will be a 8MB "hole" after the
36 * physical memory until the kernel virtual memory starts. That means that
37 * any out-of-bounds memory accesses will hopefully be caught.
38 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
39 * area for the same reason. ;)
5c3073e6 40 */
5c3073e6
RK
41#define VMALLOC_OFFSET (8*1024*1024)
42#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
0536bdf3 43#define VMALLOC_END 0xff000000UL
5c3073e6 44
1da177e4
LT
45#define LIBRARY_TEXT_START 0x0c000000
46
47#ifndef __ASSEMBLY__
69529c0e
RK
48extern void __pte_error(const char *file, int line, pte_t);
49extern void __pmd_error(const char *file, int line, pmd_t);
50extern void __pgd_error(const char *file, int line, pgd_t);
1da177e4 51
69529c0e
RK
52#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
53#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
54#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1da177e4 55
6119be0b
HD
56/*
57 * This is the lowest virtual address we can permit any user space
58 * mapping to be mapped at. This is particularly important for
59 * non-high vector CPUs.
60 */
61#define FIRST_USER_ADDRESS PAGE_SIZE
62
1da177e4 63/*
44b18693
I
64 * The pgprot_* and protection_map entries will be fixed up in runtime
65 * to include the cachable and bufferable bits based on memory policy,
66 * as well as any architecture dependent bits like global/ASID and SMP
67 * shared mapping bits.
1da177e4 68 */
bb30f36f 69#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1da177e4 70
44b18693 71extern pgprot_t pgprot_user;
1da177e4
LT
72extern pgprot_t pgprot_kernel;
73
8ec53663 74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1da177e4 75
26ffd0d4 76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
36bb94ba
RK
77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
80#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
81#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
82#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
9522d7e4
RK
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel
85
26ffd0d4 86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
36bb94ba
RK
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
90#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
91#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
92#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
44b18693 93
eb9b2b69
RK
94#define __pgprot_modify(prot,mask,bits) \
95 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
96
97#define pgprot_noncached(prot) \
98 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
99
100#define pgprot_writecombine(prot) \
101 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
102
8fb54284
SS
103#define pgprot_stronglyordered(prot) \
104 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
105
eb9b2b69
RK
106#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
107#define pgprot_dmacoherent(prot) \
9522d7e4 108 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
eb9b2b69
RK
109#define __HAVE_PHYS_MEM_ACCESS_PROT
110struct file;
111extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
112 unsigned long size, pgprot_t vma_prot);
113#else
114#define pgprot_dmacoherent(prot) \
9522d7e4 115 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
eb9b2b69
RK
116#endif
117
1da177e4
LT
118#endif /* __ASSEMBLY__ */
119
120/*
121 * The table below defines the page protection levels that we insert into our
122 * Linux page table version. These get translated into the best that the
123 * architecture can perform. Note that on most ARM hardware:
124 * 1) We cannot do execute protection
125 * 2) If we could do execute protection, then read is implied
126 * 3) write implies read permissions
127 */
44b18693
I
128#define __P000 __PAGE_NONE
129#define __P001 __PAGE_READONLY
130#define __P010 __PAGE_COPY
131#define __P011 __PAGE_COPY
8ec53663
RK
132#define __P100 __PAGE_READONLY_EXEC
133#define __P101 __PAGE_READONLY_EXEC
134#define __P110 __PAGE_COPY_EXEC
135#define __P111 __PAGE_COPY_EXEC
44b18693
I
136
137#define __S000 __PAGE_NONE
138#define __S001 __PAGE_READONLY
139#define __S010 __PAGE_SHARED
140#define __S011 __PAGE_SHARED
8ec53663
RK
141#define __S100 __PAGE_READONLY_EXEC
142#define __S101 __PAGE_READONLY_EXEC
143#define __S110 __PAGE_SHARED_EXEC
144#define __S111 __PAGE_SHARED_EXEC
1da177e4
LT
145
146#ifndef __ASSEMBLY__
147/*
148 * ZERO_PAGE is a global shared page that is always zero: used
149 * for zero-mapped memory areas etc..
150 */
151extern struct page *empty_zero_page;
152#define ZERO_PAGE(vaddr) (empty_zero_page)
153
4eec4b13
RK
154
155extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
156
157/* to find an entry in a page-table-directory */
158#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
159
160#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
161
162/* to find an entry in a kernel page-table-directory */
163#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
164
b510b049
RK
165#define pmd_none(pmd) (!pmd_val(pmd))
166#define pmd_present(pmd) (pmd_val(pmd))
b510b049
RK
167
168static inline pte_t *pmd_page_vaddr(pmd_t pmd)
169{
d7c5d0dc 170 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
b510b049
RK
171}
172
d7c5d0dc 173#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
b510b049 174
65cec8e3 175#ifndef CONFIG_HIGHPTE
b510b049 176#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
ece0e2b6 177#define __pte_unmap(pte) do { } while (0)
65cec8e3 178#else
d30e45ee
RK
179#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
180#define __pte_unmap(pte) kunmap_atomic(pte)
65cec8e3 181#endif
1da177e4 182
b510b049
RK
183#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
184
185#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
186
187#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
188#define pte_unmap(pte) __pte_unmap(pte)
189
d7c5d0dc 190#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
cae6292b 191#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
b510b049
RK
192
193#define pte_page(pte) pfn_to_page(pte_pfn(pte))
194#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
195
b510b049 196#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
ad1ae2fe 197
47f12043
WD
198#define pte_none(pte) (!pte_val(pte))
199#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
200#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
201#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
202#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204#define pte_special(pte) (0)
205
dbf62d50 206#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
47f12043 207
6012191a
CM
208#if __LINUX_ARM_ARCH__ < 6
209static inline void __sync_icache_dcache(pte_t pteval)
210{
211}
212#else
213extern void __sync_icache_dcache(pte_t pteval);
214#endif
215
216static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
217 pte_t *ptep, pte_t pteval)
218{
47f12043
WD
219 unsigned long ext = 0;
220
221 if (addr < TASK_SIZE && pte_present_user(pteval)) {
6012191a 222 __sync_icache_dcache(pteval);
47f12043 223 ext |= PTE_EXT_NG;
6012191a 224 }
1da177e4 225
47f12043
WD
226 set_pte_ext(ptep, pteval, ext);
227}
6012191a 228
1da177e4
LT
229#define PTE_BIT_FUNC(fn,op) \
230static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
231
36bb94ba
RK
232PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
233PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
1da177e4
LT
234PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
235PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
236PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
237PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
238
7e675137
NP
239static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
240
1da177e4
LT
241static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
242{
26ffd0d4 243 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
1da177e4
LT
244 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
245 return pte;
246}
247
fb93a1c7
RK
248/*
249 * Encode and decode a swap entry. Swap entries are stored in the Linux
250 * page tables as follows:
251 *
252 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
253 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
f5f2025e 254 * <--------------- offset ----------------------> < type -> 0 0 0
1da177e4 255 *
f5f2025e 256 * This gives us up to 31 swap files and 64GB per swap file. Note that
fb93a1c7 257 * the offset field is always non-zero.
1da177e4 258 */
6a00cded 259#define __SWP_TYPE_SHIFT 3
f5f2025e 260#define __SWP_TYPE_BITS 5
fb93a1c7
RK
261#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
262#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
263
264#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
265#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
266#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
267
1da177e4
LT
268#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
269#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
270
fb93a1c7
RK
271/*
272 * It is an error for the kernel to have more swap files than we can
273 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
274 * is increased beyond what we presently support.
275 */
276#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
277
65b1bfc1
RK
278/*
279 * Encode and decode a file entry. File entries are stored in the Linux
280 * page tables as follows:
281 *
282 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
283 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6a00cded 284 * <----------------------- offset ------------------------> 1 0 0
65b1bfc1
RK
285 */
286#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
6a00cded
RK
287#define pte_to_pgoff(x) (pte_val(x) >> 3)
288#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
65b1bfc1 289
6a00cded 290#define PTE_FILE_MAX_BITS 29
65b1bfc1 291
1da177e4
LT
292/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
293/* FIXME: this is not correct */
294#define kern_addr_valid(addr) (1)
295
296#include <asm-generic/pgtable.h>
297
298/*
299 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
300 */
301#define HAVE_ARCH_UNMAPPED_AREA
7dbaa466 302#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1da177e4
LT
303
304/*
33bf5610 305 * remap a physical page `pfn' of size `size' with page protection `prot'
1da177e4
LT
306 * into virtual address `from'
307 */
1da177e4
LT
308#define io_remap_pfn_range(vma,from,pfn,size,prot) \
309 remap_pfn_range(vma, from, pfn, size, prot)
310
1da177e4
LT
311#define pgtable_cache_init() do { } while (0)
312
313#endif /* !__ASSEMBLY__ */
314
002547b4
RK
315#endif /* CONFIG_MMU */
316
1da177e4 317#endif /* _ASMARM_PGTABLE_H */
This page took 0.591614 seconds and 5 git commands to generate.