Merge remote-tracking branches 'asoc/fix/rcar', 'asoc/fix/rt5670' and 'asoc/fix/wm894...
[deliverable/linux.git] / arch / arm / include / asm / pgtable.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/pgtable.h
1da177e4
LT
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
f6e3354d 13#include <linux/const.h>
002547b4
RK
14#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
a32618d2 18#include <asm-generic/4level-fixup.h>
a1ce3928 19#include <asm/pgtable-nommu.h>
002547b4
RK
20
21#else
1da177e4 22
a32618d2 23#include <asm-generic/pgtable-nopud.h>
1da177e4 24#include <asm/memory.h>
ad1ae2fe 25#include <asm/pgtable-hwdef.h>
1da177e4 26
8d962507
CM
27
28#include <asm/tlbflush.h>
29
dcfdae04
CM
30#ifdef CONFIG_ARM_LPAE
31#include <asm/pgtable-3level.h>
32#else
17f57211 33#include <asm/pgtable-2level.h>
dcfdae04 34#endif
17f57211 35
5c3073e6
RK
36/*
37 * Just any arbitrary offset to the start of the vmalloc VM area: the
38 * current 8MB value just means that there will be a 8MB "hole" after the
39 * physical memory until the kernel virtual memory starts. That means that
40 * any out-of-bounds memory accesses will hopefully be caught.
41 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
42 * area for the same reason. ;)
5c3073e6 43 */
5c3073e6
RK
44#define VMALLOC_OFFSET (8*1024*1024)
45#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
6ff09660 46#define VMALLOC_END 0xff800000UL
5c3073e6 47
1da177e4
LT
48#define LIBRARY_TEXT_START 0x0c000000
49
50#ifndef __ASSEMBLY__
69529c0e
RK
51extern void __pte_error(const char *file, int line, pte_t);
52extern void __pmd_error(const char *file, int line, pmd_t);
53extern void __pgd_error(const char *file, int line, pgd_t);
1da177e4 54
69529c0e
RK
55#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
56#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
57#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1da177e4 58
6119be0b
HD
59/*
60 * This is the lowest virtual address we can permit any user space
61 * mapping to be mapped at. This is particularly important for
62 * non-high vector CPUs.
63 */
d8aa712c 64#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
6119be0b 65
104ad3b3
CM
66/*
67 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
68 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
69 * page shared between user and kernel).
70 */
71#ifdef CONFIG_ARM_LPAE
72#define USER_PGTABLES_CEILING TASK_SIZE
73#endif
74
1da177e4 75/*
44b18693
I
76 * The pgprot_* and protection_map entries will be fixed up in runtime
77 * to include the cachable and bufferable bits based on memory policy,
78 * as well as any architecture dependent bits like global/ASID and SMP
79 * shared mapping bits.
1da177e4 80 */
bb30f36f 81#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1da177e4 82
44b18693 83extern pgprot_t pgprot_user;
1da177e4 84extern pgprot_t pgprot_kernel;
cc577c26
CD
85extern pgprot_t pgprot_hyp_device;
86extern pgprot_t pgprot_s2;
87extern pgprot_t pgprot_s2_device;
1da177e4 88
8ec53663 89#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1da177e4 90
26ffd0d4 91#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
36bb94ba
RK
92#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
93#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
94#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
95#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
96#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
97#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
9522d7e4
RK
98#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
99#define PAGE_KERNEL_EXEC pgprot_kernel
cc577c26
CD
100#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
101#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
102#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
903ed3a5 103#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
9522d7e4 104
26ffd0d4 105#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
36bb94ba
RK
106#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
107#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
108#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
109#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
110#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
111#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
44b18693 112
eb9b2b69
RK
113#define __pgprot_modify(prot,mask,bits) \
114 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
115
116#define pgprot_noncached(prot) \
117 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
118
119#define pgprot_writecombine(prot) \
120 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
121
8fb54284
SS
122#define pgprot_stronglyordered(prot) \
123 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
124
eb9b2b69
RK
125#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
126#define pgprot_dmacoherent(prot) \
9522d7e4 127 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
eb9b2b69
RK
128#define __HAVE_PHYS_MEM_ACCESS_PROT
129struct file;
130extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
131 unsigned long size, pgprot_t vma_prot);
132#else
133#define pgprot_dmacoherent(prot) \
9522d7e4 134 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
eb9b2b69
RK
135#endif
136
1da177e4
LT
137#endif /* __ASSEMBLY__ */
138
139/*
140 * The table below defines the page protection levels that we insert into our
141 * Linux page table version. These get translated into the best that the
142 * architecture can perform. Note that on most ARM hardware:
143 * 1) We cannot do execute protection
144 * 2) If we could do execute protection, then read is implied
145 * 3) write implies read permissions
146 */
44b18693
I
147#define __P000 __PAGE_NONE
148#define __P001 __PAGE_READONLY
149#define __P010 __PAGE_COPY
150#define __P011 __PAGE_COPY
8ec53663
RK
151#define __P100 __PAGE_READONLY_EXEC
152#define __P101 __PAGE_READONLY_EXEC
153#define __P110 __PAGE_COPY_EXEC
154#define __P111 __PAGE_COPY_EXEC
44b18693
I
155
156#define __S000 __PAGE_NONE
157#define __S001 __PAGE_READONLY
158#define __S010 __PAGE_SHARED
159#define __S011 __PAGE_SHARED
8ec53663
RK
160#define __S100 __PAGE_READONLY_EXEC
161#define __S101 __PAGE_READONLY_EXEC
162#define __S110 __PAGE_SHARED_EXEC
163#define __S111 __PAGE_SHARED_EXEC
1da177e4
LT
164
165#ifndef __ASSEMBLY__
166/*
167 * ZERO_PAGE is a global shared page that is always zero: used
168 * for zero-mapped memory areas etc..
169 */
170extern struct page *empty_zero_page;
171#define ZERO_PAGE(vaddr) (empty_zero_page)
172
4eec4b13
RK
173
174extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
175
176/* to find an entry in a page-table-directory */
177#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
178
179#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
180
181/* to find an entry in a kernel page-table-directory */
182#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
183
b510b049 184#define pmd_none(pmd) (!pmd_val(pmd))
b510b049
RK
185
186static inline pte_t *pmd_page_vaddr(pmd_t pmd)
187{
d7c5d0dc 188 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
b510b049
RK
189}
190
d7c5d0dc 191#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
b510b049 192
65cec8e3 193#ifndef CONFIG_HIGHPTE
b510b049 194#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
ece0e2b6 195#define __pte_unmap(pte) do { } while (0)
65cec8e3 196#else
d30e45ee
RK
197#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
198#define __pte_unmap(pte) kunmap_atomic(pte)
65cec8e3 199#endif
1da177e4 200
b510b049
RK
201#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
202
203#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
204
205#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
206#define pte_unmap(pte) __pte_unmap(pte)
207
d7c5d0dc 208#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
cae6292b 209#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
b510b049
RK
210
211#define pte_page(pte) pfn_to_page(pte_pfn(pte))
212#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
213
b510b049 214#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
ad1ae2fe 215
f2950706
SC
216#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
217 : !!(pte_val(pte) & (val)))
218#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
219
47f12043 220#define pte_none(pte) (!pte_val(pte))
f2950706
SC
221#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
222#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
1971188a 223#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
f2950706
SC
224#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
225#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
226#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
227#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
47f12043 228
1971188a 229#define pte_valid_user(pte) \
f2950706 230 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
47f12043 231
6012191a
CM
232#if __LINUX_ARM_ARCH__ < 6
233static inline void __sync_icache_dcache(pte_t pteval)
234{
235}
236#else
237extern void __sync_icache_dcache(pte_t pteval);
238#endif
239
240static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, pte_t pteval)
242{
47f12043
WD
243 unsigned long ext = 0;
244
1971188a 245 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
bd951303
SC
246 if (!pte_special(pteval))
247 __sync_icache_dcache(pteval);
47f12043 248 ext |= PTE_EXT_NG;
6012191a 249 }
1da177e4 250
47f12043
WD
251 set_pte_ext(ptep, pteval, ext);
252}
6012191a 253
1f92f77a
JL
254static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
255{
256 pte_val(pte) &= ~pgprot_val(prot);
257 return pte;
258}
259
260static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
261{
262 pte_val(pte) |= pgprot_val(prot);
263 return pte;
264}
265
266static inline pte_t pte_wrprotect(pte_t pte)
267{
268 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
269}
270
271static inline pte_t pte_mkwrite(pte_t pte)
272{
273 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
274}
275
276static inline pte_t pte_mkclean(pte_t pte)
277{
278 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
279}
280
281static inline pte_t pte_mkdirty(pte_t pte)
282{
283 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
284}
285
286static inline pte_t pte_mkold(pte_t pte)
287{
288 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
289}
290
291static inline pte_t pte_mkyoung(pte_t pte)
292{
293 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
294}
295
296static inline pte_t pte_mkexec(pte_t pte)
297{
298 return clear_pte_bit(pte, __pgprot(L_PTE_XN));
299}
300
301static inline pte_t pte_mknexec(pte_t pte)
302{
303 return set_pte_bit(pte, __pgprot(L_PTE_XN));
304}
1da177e4 305
1da177e4
LT
306static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307{
69dde4c5
CM
308 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
309 L_PTE_NONE | L_PTE_VALID;
1da177e4
LT
310 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
311 return pte;
312}
313
fb93a1c7
RK
314/*
315 * Encode and decode a swap entry. Swap entries are stored in the Linux
316 * page tables as follows:
317 *
318 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
319 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
b007ea79 320 * <--------------- offset ------------------------> < type -> 0 0
1da177e4 321 *
b007ea79 322 * This gives us up to 31 swap files and 128GB per swap file. Note that
fb93a1c7 323 * the offset field is always non-zero.
1da177e4 324 */
b007ea79 325#define __SWP_TYPE_SHIFT 2
f5f2025e 326#define __SWP_TYPE_BITS 5
fb93a1c7
RK
327#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
328#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
329
330#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
331#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
332#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
333
1da177e4
LT
334#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
335#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
336
fb93a1c7
RK
337/*
338 * It is an error for the kernel to have more swap files than we can
339 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
340 * is increased beyond what we presently support.
341 */
342#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
343
1da177e4
LT
344/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
345/* FIXME: this is not correct */
346#define kern_addr_valid(addr) (1)
347
348#include <asm-generic/pgtable.h>
349
350/*
351 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
352 */
353#define HAVE_ARCH_UNMAPPED_AREA
7dbaa466 354#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1da177e4 355
1da177e4
LT
356#define pgtable_cache_init() do { } while (0)
357
358#endif /* !__ASSEMBLY__ */
359
002547b4
RK
360#endif /* CONFIG_MMU */
361
1da177e4 362#endif /* _ASMARM_PGTABLE_H */
This page took 0.802289 seconds and 5 git commands to generate.