7e7b18ea986e517c8084ec89560760ee5cf0e199
[deliverable/linux.git] / include / asm-ppc64 / page.h
1 #ifndef _PPC64_PAGE_H
2 #define _PPC64_PAGE_H
3
4 /*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/config.h>
14
15 #ifdef __ASSEMBLY__
16 #define ASM_CONST(x) x
17 #else
18 #define __ASM_CONST(x) x##UL
19 #define ASM_CONST(x) __ASM_CONST(x)
20 #endif
21
22 /* PAGE_SHIFT determines the page size */
23 #define PAGE_SHIFT 12
24 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25 #define PAGE_MASK (~(PAGE_SIZE-1))
26
27 #define SID_SHIFT 28
28 #define SID_MASK 0xfffffffffUL
29 #define ESID_MASK 0xfffffffff0000000UL
30 #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
31
32 #define HPAGE_SHIFT 24
33 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
34 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
35
36 #ifdef CONFIG_HUGETLB_PAGE
37
38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
39
40 /* For 64-bit processes the hugepage range is 1T-1.5T */
41 #define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
42 #define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
43
44 #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
45 - (1U << GET_ESID(addr))) & 0xffff)
46
47 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
48 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
49 #define ARCH_HAS_SETCLEAR_HUGE_PTE
50
51 #define touches_hugepage_low_range(mm, addr, len) \
52 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs)
53 #define touches_hugepage_high_range(addr, len) \
54 (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
55
56 #define __within_hugepage_low_range(addr, len, segmask) \
57 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
58 #define within_hugepage_low_range(addr, len) \
59 __within_hugepage_low_range((addr), (len), \
60 current->mm->context.htlb_segs)
61 #define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \
62 && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr)))
63
64 #define is_hugepage_only_range(mm, addr, len) \
65 (touches_hugepage_high_range((addr), (len)) || \
66 touches_hugepage_low_range((mm), (addr), (len)))
67 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
68
69 #define in_hugepage_area(context, addr) \
70 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
71 ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
72 ( ((addr) < 0x100000000L) && \
73 ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) )
74
75 #else /* !CONFIG_HUGETLB_PAGE */
76
77 #define in_hugepage_area(mm, addr) 0
78
79 #endif /* !CONFIG_HUGETLB_PAGE */
80
81 /* align addr on a size boundary - adjust address up/down if needed */
82 #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
83 #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
84
85 /* align addr on a size boundary - adjust address up if needed */
86 #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
87
88 /* to align the pointer to the (next) page boundary */
89 #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
90
91 #ifdef __KERNEL__
92 #ifndef __ASSEMBLY__
93 #include <asm/cache.h>
94
95 #undef STRICT_MM_TYPECHECKS
96
97 #define REGION_SIZE 4UL
98 #define REGION_SHIFT 60UL
99 #define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
100
101 static __inline__ void clear_page(void *addr)
102 {
103 unsigned long lines, line_size;
104
105 line_size = ppc64_caches.dline_size;
106 lines = ppc64_caches.dlines_per_page;
107
108 __asm__ __volatile__(
109 "mtctr %1 # clear_page\n\
110 1: dcbz 0,%0\n\
111 add %0,%0,%3\n\
112 bdnz+ 1b"
113 : "=r" (addr)
114 : "r" (lines), "0" (addr), "r" (line_size)
115 : "ctr", "memory");
116 }
117
118 extern void copy_page(void *to, void *from);
119 struct page;
120 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
121 extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
122
123 #ifdef STRICT_MM_TYPECHECKS
124 /*
125 * These are used to make use of C type-checking.
126 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
127 */
128 typedef struct { unsigned long pte; } pte_t;
129 typedef struct { unsigned long pmd; } pmd_t;
130 typedef struct { unsigned long pud; } pud_t;
131 typedef struct { unsigned long pgd; } pgd_t;
132 typedef struct { unsigned long pgprot; } pgprot_t;
133
134 #define pte_val(x) ((x).pte)
135 #define pmd_val(x) ((x).pmd)
136 #define pud_val(x) ((x).pud)
137 #define pgd_val(x) ((x).pgd)
138 #define pgprot_val(x) ((x).pgprot)
139
140 #define __pte(x) ((pte_t) { (x) })
141 #define __pmd(x) ((pmd_t) { (x) })
142 #define __pud(x) ((pud_t) { (x) })
143 #define __pgd(x) ((pgd_t) { (x) })
144 #define __pgprot(x) ((pgprot_t) { (x) })
145
146 #else
147 /*
148 * .. while these make it easier on the compiler
149 */
150 typedef unsigned long pte_t;
151 typedef unsigned long pmd_t;
152 typedef unsigned long pud_t;
153 typedef unsigned long pgd_t;
154 typedef unsigned long pgprot_t;
155
156 #define pte_val(x) (x)
157 #define pmd_val(x) (x)
158 #define pud_val(x) (x)
159 #define pgd_val(x) (x)
160 #define pgprot_val(x) (x)
161
162 #define __pte(x) (x)
163 #define __pmd(x) (x)
164 #define __pud(x) (x)
165 #define __pgd(x) (x)
166 #define __pgprot(x) (x)
167
168 #endif
169
170 /* Pure 2^n version of get_order */
171 static inline int get_order(unsigned long size)
172 {
173 int order;
174
175 size = (size-1) >> (PAGE_SHIFT-1);
176 order = -1;
177 do {
178 size >>= 1;
179 order++;
180 } while (size);
181 return order;
182 }
183
184 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
185
186 extern int page_is_ram(unsigned long pfn);
187
188 extern u64 ppc64_pft_size; /* Log 2 of page table size */
189
190 /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
191 #define __HAVE_ARCH_GATE_AREA 1
192
193 #endif /* __ASSEMBLY__ */
194
195 #ifdef MODULE
196 #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
197 #else
198 #define __page_aligned \
199 __attribute__((__aligned__(PAGE_SIZE), \
200 __section__(".data.page_aligned")))
201 #endif
202
203
204 /* This must match the -Ttext linker address */
205 /* Note: tophys & tovirt make assumptions about how */
206 /* KERNELBASE is defined for performance reasons. */
207 /* When KERNELBASE moves, those macros may have */
208 /* to change! */
209 #define PAGE_OFFSET ASM_CONST(0xC000000000000000)
210 #define KERNELBASE PAGE_OFFSET
211 #define VMALLOCBASE ASM_CONST(0xD000000000000000)
212
213 #define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
214 #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
215 #define USER_REGION_ID (0UL)
216 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
217
218 #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
219
220 #ifdef CONFIG_DISCONTIGMEM
221 #define page_to_pfn(page) discontigmem_page_to_pfn(page)
222 #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
223 #define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
224 #endif
225 #ifdef CONFIG_FLATMEM
226 #define pfn_to_page(pfn) (mem_map + (pfn))
227 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
228 #define pfn_valid(pfn) ((pfn) < max_mapnr)
229 #endif
230
231 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
232 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
233
234 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
235
236 /*
237 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
238 * and needs to be executable. This means the whole heap ends
239 * up being executable.
240 */
241 #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
242 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
243
244 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
245 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
246
247 #define VM_DATA_DEFAULT_FLAGS \
248 (test_thread_flag(TIF_32BIT) ? \
249 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
250
251 /*
252 * This is the default if a program doesn't have a PT_GNU_STACK
253 * program header entry. The PPC64 ELF ABI has a non executable stack
254 * stack by default, so in the absense of a PT_GNU_STACK program header
255 * we turn execute permission off.
256 */
257 #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
258 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
259
260 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
261 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
262
263 #define VM_STACK_DEFAULT_FLAGS \
264 (test_thread_flag(TIF_32BIT) ? \
265 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
266
267 #endif /* __KERNEL__ */
268 #endif /* _PPC64_PAGE_H */
This page took 0.036825 seconds and 4 git commands to generate.