powerpc: Make STRICT_MM_TYPECHECKS a config option
[deliverable/linux.git] / arch / powerpc / include / asm / page.h
CommitLineData
5cd16ee9
ME
1#ifndef _ASM_POWERPC_PAGE_H
2#define _ASM_POWERPC_PAGE_H
3
4/*
5 * Copyright (C) 2001,2005 IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
a3ba68f9
KG
13#ifndef __ASSEMBLY__
14#include <linux/types.h>
463baa8a
SR
15#else
16#include <asm/types.h>
a3ba68f9 17#endif
5cd16ee9 18#include <asm/asm-compat.h>
47310413 19#include <asm/kdump.h>
5cd16ee9
ME
20
21/*
e1240122 22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
ca9153a3 23 * on PPC44x). For PPC64 we support either 4K or 64K software
5cd16ee9
ME
24 * page size. When using 64K pages however, whether we are really supporting
25 * 64K pages in HW or not is irrelevant to those definitions.
26 */
e1240122
YT
27#if defined(CONFIG_PPC_256K_PAGES)
28#define PAGE_SHIFT 18
29#elif defined(CONFIG_PPC_64K_PAGES)
5cd16ee9 30#define PAGE_SHIFT 16
ca9153a3
IY
31#elif defined(CONFIG_PPC_16K_PAGES)
32#define PAGE_SHIFT 14
5cd16ee9
ME
33#else
34#define PAGE_SHIFT 12
35#endif
36
37#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
38
41151e77
BB
39#ifndef __ASSEMBLY__
40#ifdef CONFIG_HUGETLB_PAGE
41extern unsigned int HPAGE_SHIFT;
42#else
43#define HPAGE_SHIFT PAGE_SHIFT
44#endif
45#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
46#define HPAGE_MASK (~(HPAGE_SIZE - 1))
47#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
48#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
49#endif
50
5cd16ee9
ME
51/*
52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
53 * assign PAGE_MASK to a larger type it gets extended the way we want
54 * (i.e. with 1s in the high bits)
55 */
56#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
57
b5666f70
ME
58/*
59 * KERNELBASE is the virtual address of the start of the kernel, it's often
60 * the same as PAGE_OFFSET, but _might not be_.
61 *
62 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
63 *
37dd2bad
KG
64 * PAGE_OFFSET is the virtual address of the start of lowmem.
65 *
66 * PHYSICAL_START is the physical address of the start of the kernel.
67 *
68 * MEMORY_START is the physical address of the start of lowmem.
69 *
70 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
71 * ppc32 and based on how they are set we determine MEMORY_START.
72 *
73 * For the linear mapping the following equation should be true:
74 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
75 *
76 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
77 *
b8394179 78 * There are two ways to determine a physical address from a virtual one:
37dd2bad
KG
79 * va = pa + PAGE_OFFSET - MEMORY_START
80 * va = pa + KERNELBASE - PHYSICAL_START
b5666f70
ME
81 *
82 * If you want to know something's offset from the start of the kernel you
83 * should subtract KERNELBASE.
84 *
85 * If you want to test if something's a kernel address, use is_kernel_addr().
86 */
398ab1fc 87
37dd2bad
KG
88#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
89#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
90#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
91
0f890c8d 92#if defined(CONFIG_NONSTATIC_KERNEL)
37dd2bad 93#ifndef __ASSEMBLY__
a3ba68f9 94
37dd2bad
KG
95extern phys_addr_t memstart_addr;
96extern phys_addr_t kernstart_addr;
368ff8f1
SP
97
98#ifdef CONFIG_RELOCATABLE_PPC32
99extern long long virt_phys_offset;
37dd2bad 100#endif
368ff8f1
SP
101
102#endif /* __ASSEMBLY__ */
37dd2bad 103#define PHYSICAL_START kernstart_addr
368ff8f1
SP
104
105#else /* !CONFIG_NONSTATIC_KERNEL */
37dd2bad 106#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
549e8152
PM
107#endif
108
368ff8f1
SP
109/* See Description below for VIRT_PHYS_OFFSET */
110#ifdef CONFIG_RELOCATABLE_PPC32
111#define VIRT_PHYS_OFFSET virt_phys_offset
112#else
113#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
114#endif
115
116
549e8152
PM
117#ifdef CONFIG_PPC64
118#define MEMORY_START 0UL
0f890c8d 119#elif defined(CONFIG_NONSTATIC_KERNEL)
549e8152
PM
120#define MEMORY_START memstart_addr
121#else
37dd2bad
KG
122#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
123#endif
5cd16ee9 124
5cd16ee9 125#ifdef CONFIG_FLATMEM
67eb5494 126#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
81c386cc 127#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
5cd16ee9
ME
128#endif
129
130#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
131#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
132#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
133
dbc9632a
KG
134/*
135 * On Book-E parts we need __va to parse the device tree and we can't
136 * determine MEMORY_START until then. However we can determine PHYSICAL_START
137 * from information at hand (program counter, TLB lookup).
138 *
368ff8f1
SP
139 * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
140 *
141 * With RELOCATABLE_PPC32, we support loading the kernel at any physical
142 * address without any restriction on the page alignment.
143 *
144 * We find the runtime address of _stext and relocate ourselves based on
145 * the following calculation:
146 *
147 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
148 * MODULO(_stext.run,256M)
149 * and create the following mapping:
150 *
151 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
152 *
153 * When we process relocations, we cannot depend on the
154 * existing equation for the __va()/__pa() translations:
155 *
156 * __va(x) = (x) - PHYSICAL_START + KERNELBASE
157 *
158 * Where:
159 * PHYSICAL_START = kernstart_addr = Physical address of _stext
160 * KERNELBASE = Compiled virtual address of _stext.
161 *
162 * This formula holds true iff, kernel load address is TLB page aligned.
163 *
164 * In our case, we need to also account for the shift in the kernel Virtual
165 * address.
166 *
167 * E.g.,
168 *
169 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
170 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
171 *
172 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
173 * = 0xbc100000 , which is wrong.
174 *
175 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
176 * according to our mapping.
177 *
178 * Hence we use the following formula to get the translations right:
179 *
180 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
181 *
182 * Where :
183 * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
184 * Effective KERNELBASE = virtual_base =
185 * = ALIGN_DOWN(KERNELBASE,256M) +
186 * MODULO(PHYSICAL_START,256M)
187 *
188 * To make the cost of __va() / __pa() more light weight, we introduce
189 * a new variable virt_phys_offset, which will hold :
190 *
191 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
192 * = ALIGN_DOWN(KERNELBASE,256M) -
193 * ALIGN_DOWN(PHYSICALSTART,256M)
194 *
195 * Hence :
196 *
197 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
198 * = x + virt_phys_offset
199 *
200 * and
201 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
202 * = x - virt_phys_offset
203 *
dbc9632a
KG
204 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
205 * the other definitions for __va & __pa.
206 */
207#ifdef CONFIG_BOOKE
368ff8f1
SP
208#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
209#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
dbc9632a 210#else
bdbc29c1
PM
211#ifdef CONFIG_PPC64
212/*
213 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
214 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
215 */
216#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
217#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
218
219#else /* 32-bit, non book E */
dbc9632a 220#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
549e8152 221#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
dbc9632a 222#endif
bdbc29c1 223#endif
5cd16ee9
ME
224
225/*
226 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
227 * and needs to be executable. This means the whole heap ends
228 * up being executable.
229 */
230#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
231 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
232
233#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
234 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
235
236#ifdef __powerpc64__
237#include <asm/page_64.h>
238#else
239#include <asm/page_32.h>
240#endif
241
242/* align addr on a size boundary - adjust address up/down if needed */
243#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
244#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
245
246/* align addr on a size boundary - adjust address up if needed */
247#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
248
51fae6de
ME
249/*
250 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
251 * "kernelness", use is_kernel_addr() - it should do what you want.
252 */
57e2a99f
BH
253#ifdef CONFIG_PPC_BOOK3E_64
254#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
255#else
51fae6de 256#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
57e2a99f 257#endif
51fae6de 258
cf9427b8 259#ifndef CONFIG_PPC_BOOK3S_64
41151e77
BB
260/*
261 * Use the top bit of the higher-level page table entries to indicate whether
262 * the entries we point to contain hugepages. This works because we know that
263 * the page tables live in kernel space. If we ever decide to support having
264 * page tables at arbitrary addresses, this breaks and will have to change.
265 */
266#ifdef CONFIG_PPC64
267#define PD_HUGE 0x8000000000000000
268#else
269#define PD_HUGE 0x80000000
270#endif
cf9427b8 271#endif /* CONFIG_PPC_BOOK3S_64 */
41151e77
BB
272
273/*
274 * Some number of bits at the level of the page table that points to
275 * a hugepte are used to encode the size. This masks those bits.
276 */
277#define HUGEPD_SHIFT_MASK 0x3f
278
5cd16ee9
ME
279#ifndef __ASSEMBLY__
280
f1e7c202 281#ifdef CONFIG_STRICT_MM_TYPECHECKS
5cd16ee9
ME
282/* These are used to make use of C type-checking. */
283
284/* PTE level */
285typedef struct { pte_basic_t pte; } pte_t;
286#define pte_val(x) ((x).pte)
287#define __pte(x) ((pte_t) { (x) })
288
289/* 64k pages additionally define a bigger "real PTE" type that gathers
290 * the "second half" part of the PTE for pseudo 64k pages
291 */
ca9153a3 292#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
5cd16ee9
ME
293typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
294#else
295typedef struct { pte_t pte; } real_pte_t;
296#endif
297
298/* PMD level */
d1953c88 299#ifdef CONFIG_PPC64
5cd16ee9
ME
300typedef struct { unsigned long pmd; } pmd_t;
301#define pmd_val(x) ((x).pmd)
302#define __pmd(x) ((pmd_t) { (x) })
303
304/* PUD level exusts only on 4k pages */
d1953c88 305#ifndef CONFIG_PPC_64K_PAGES
5cd16ee9
ME
306typedef struct { unsigned long pud; } pud_t;
307#define pud_val(x) ((x).pud)
308#define __pud(x) ((pud_t) { (x) })
d1953c88
DG
309#endif /* !CONFIG_PPC_64K_PAGES */
310#endif /* CONFIG_PPC64 */
5cd16ee9
ME
311
312/* PGD level */
313typedef struct { unsigned long pgd; } pgd_t;
314#define pgd_val(x) ((x).pgd)
315#define __pgd(x) ((pgd_t) { (x) })
316
317/* Page protection bits */
318typedef struct { unsigned long pgprot; } pgprot_t;
319#define pgprot_val(x) ((x).pgprot)
320#define __pgprot(x) ((pgprot_t) { (x) })
321
322#else
323
324/*
325 * .. while these make it easier on the compiler
326 */
327
328typedef pte_basic_t pte_t;
329#define pte_val(x) (x)
330#define __pte(x) (x)
331
ca9153a3 332#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
5cd16ee9
ME
333typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
334#else
ca9153a3 335typedef pte_t real_pte_t;
5cd16ee9
ME
336#endif
337
338
d1953c88 339#ifdef CONFIG_PPC64
5cd16ee9
ME
340typedef unsigned long pmd_t;
341#define pmd_val(x) (x)
342#define __pmd(x) (x)
343
d1953c88 344#ifndef CONFIG_PPC_64K_PAGES
5cd16ee9
ME
345typedef unsigned long pud_t;
346#define pud_val(x) (x)
347#define __pud(x) (x)
d1953c88
DG
348#endif /* !CONFIG_PPC_64K_PAGES */
349#endif /* CONFIG_PPC64 */
5cd16ee9
ME
350
351typedef unsigned long pgd_t;
352#define pgd_val(x) (x)
353#define pgprot_val(x) (x)
354
355typedef unsigned long pgprot_t;
356#define __pgd(x) (x)
357#define __pgprot(x) (x)
358
359#endif
360
a4fe3ce7 361typedef struct { signed long pd; } hugepd_t;
a4fe3ce7
DG
362
363#ifdef CONFIG_HUGETLB_PAGE
cf9427b8
AK
364#ifdef CONFIG_PPC_BOOK3S_64
365static inline int hugepd_ok(hugepd_t hpd)
366{
367 /*
368 * hugepd pointer, bottom two bits == 00 and next 4 bits
369 * indicate size of table
370 */
371 return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
372}
373#else
a4fe3ce7
DG
374static inline int hugepd_ok(hugepd_t hpd)
375{
376 return (hpd.pd > 0);
377}
cf9427b8 378#endif
a4fe3ce7 379
b30e7590 380#define is_hugepd(hpd) (hugepd_ok(hpd))
f30c59e9 381#define pgd_huge pgd_huge
e2b3d202 382int pgd_huge(pgd_t pgd);
a4fe3ce7
DG
383#else /* CONFIG_HUGETLB_PAGE */
384#define is_hugepd(pdep) 0
e2b3d202 385#define pgd_huge(pgd) 0
a4fe3ce7 386#endif /* CONFIG_HUGETLB_PAGE */
b30e7590 387#define __hugepd(x) ((hugepd_t) { (x) })
a4fe3ce7 388
5cd16ee9
ME
389struct page;
390extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
391extern void copy_user_page(void *to, void *from, unsigned long vaddr,
392 struct page *p);
393extern int page_is_ram(unsigned long pfn);
1d54cf2b 394extern int devmem_is_allowed(unsigned long pfn);
5cd16ee9 395
14f966e7
RJ
396#ifdef CONFIG_PPC_SMLPAR
397void arch_free_page(struct page *page, int order);
398#define HAVE_ARCH_FREE_PAGE
399#endif
400
a5bba930 401struct vm_area_struct;
a5bba930 402
ecb35c39 403#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
5c1f6ee9
AK
404typedef pte_t *pgtable_t;
405#else
2f569afd 406typedef struct page *pgtable_t;
5c1f6ee9 407#endif
2f569afd 408
659e3505 409#include <asm-generic/memory_model.h>
5cd16ee9
ME
410#endif /* __ASSEMBLY__ */
411
5cd16ee9 412#endif /* _ASM_POWERPC_PAGE_H */
This page took 0.844128 seconds and 5 git commands to generate.