Commit | Line | Data |
---|---|---|
f88df14b DG |
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC32_H |
2 | #define _ASM_POWERPC_PGTABLE_PPC32_H | |
3 | ||
d1953c88 | 4 | #include <asm-generic/pgtable-nopmd.h> |
f88df14b DG |
5 | |
6 | #ifndef __ASSEMBLY__ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/threads.h> | |
f88df14b | 9 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ |
f88df14b DG |
10 | |
11 | extern unsigned long va_to_phys(unsigned long address); | |
12 | extern pte_t *va_to_pte(unsigned long address); | |
13 | extern unsigned long ioremap_bot, ioremap_base; | |
b98ac05d BH |
14 | |
15 | #ifdef CONFIG_44x | |
16 | extern int icache_44x_need_flush; | |
17 | #endif | |
18 | ||
f88df14b DG |
19 | #endif /* __ASSEMBLY__ */ |
20 | ||
f88df14b DG |
21 | /* |
22 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
23 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
24 | * | |
25 | * For any >32-bit physical address platform, we can use the following | |
26 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
27 | * are an index to the second level table. The combined pgdir/pmd first | |
28 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
29 | * -Matt | |
30 | */ | |
f88df14b | 31 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
d1953c88 | 32 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) |
f88df14b DG |
33 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
34 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
35 | ||
36 | /* | |
37 | * entries per page directory level: our page-table tree is two-level, so | |
38 | * we don't really have any PMD directory. | |
39 | */ | |
bee86f14 KG |
40 | #ifndef __ASSEMBLY__ |
41 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) | |
42 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) | |
43 | #endif /* __ASSEMBLY__ */ | |
44 | ||
f88df14b DG |
45 | #define PTRS_PER_PTE (1 << PTE_SHIFT) |
46 | #define PTRS_PER_PMD 1 | |
47 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | |
48 | ||
49 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
50 | #define FIRST_USER_ADDRESS 0 | |
51 | ||
f88df14b | 52 | #define pte_ERROR(e) \ |
0aeafb0c DG |
53 | printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
54 | (unsigned long long)pte_val(e)) | |
f88df14b DG |
55 | #define pgd_ERROR(e) \ |
56 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
57 | ||
58 | /* | |
59 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
60 | * current 64MB value just means that there will be a 64MB "hole" after the | |
61 | * physical memory until the kernel virtual memory starts. That means that | |
62 | * any out-of-bounds memory accesses will hopefully be caught. | |
63 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
64 | * area for the same reason. ;) | |
65 | * | |
66 | * We no longer map larger than phys RAM with the BATs so we don't have | |
67 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
68 | * about clashes between our early calls to ioremap() that start growing down | |
69 | * from ioremap_base being run into the VM area allocations (growing upwards | |
70 | * from VMALLOC_START). For this reason we have ioremap_bot to check when | |
71 | * we actually run into our mappings setup in the early boot with the VM | |
72 | * system. This really does become a problem for machines with good amounts | |
73 | * of RAM. -- Cort | |
74 | */ | |
75 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
76 | #ifdef PPC_PIN_SIZE | |
77 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
78 | #else | |
79 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
80 | #endif | |
81 | #define VMALLOC_END ioremap_bot | |
82 | ||
83 | /* | |
84 | * Bits in a linux-style PTE. These match the bits in the | |
85 | * (hardware-defined) PowerPC PTE as closely as possible. | |
86 | */ | |
87 | ||
88 | #if defined(CONFIG_40x) | |
c605782b | 89 | #include <asm/pte-40x.h> |
f88df14b | 90 | #elif defined(CONFIG_44x) |
c605782b | 91 | #include <asm/pte-44x.h> |
f88df14b | 92 | #elif defined(CONFIG_FSL_BOOKE) |
c605782b | 93 | #include <asm/pte-fsl-booke.h> |
f88df14b | 94 | #elif defined(CONFIG_8xx) |
c605782b | 95 | #include <asm/pte-8xx.h> |
f88df14b | 96 | #else /* CONFIG_6xx */ |
c605782b | 97 | #include <asm/pte-hash32.h> |
4ee7084e | 98 | #endif |
f88df14b | 99 | |
c605782b BH |
100 | /* If _PAGE_SPECIAL is defined, then we advertise our support for it */ |
101 | #ifdef _PAGE_SPECIAL | |
9a62c051 | 102 | #define __HAVE_ARCH_PTE_SPECIAL |
f88df14b DG |
103 | #endif |
104 | ||
105 | /* | |
c605782b BH |
106 | * Some bits are only used on some cpu families... Make sure that all |
107 | * the undefined gets defined as 0 | |
f88df14b DG |
108 | */ |
109 | #ifndef _PAGE_HASHPTE | |
110 | #define _PAGE_HASHPTE 0 | |
111 | #endif | |
112 | #ifndef _PTE_NONE_MASK | |
113 | #define _PTE_NONE_MASK 0 | |
114 | #endif | |
115 | #ifndef _PAGE_SHARED | |
116 | #define _PAGE_SHARED 0 | |
117 | #endif | |
118 | #ifndef _PAGE_HWWRITE | |
119 | #define _PAGE_HWWRITE 0 | |
120 | #endif | |
121 | #ifndef _PAGE_HWEXEC | |
122 | #define _PAGE_HWEXEC 0 | |
123 | #endif | |
124 | #ifndef _PAGE_EXEC | |
125 | #define _PAGE_EXEC 0 | |
126 | #endif | |
a1f242ff BH |
127 | #ifndef _PAGE_ENDIAN |
128 | #define _PAGE_ENDIAN 0 | |
129 | #endif | |
130 | #ifndef _PAGE_COHERENT | |
131 | #define _PAGE_COHERENT 0 | |
132 | #endif | |
ff8dc769 KG |
133 | #ifndef _PAGE_WRITETHRU |
134 | #define _PAGE_WRITETHRU 0 | |
135 | #endif | |
9a62c051 KG |
136 | #ifndef _PAGE_SPECIAL |
137 | #define _PAGE_SPECIAL 0 | |
138 | #endif | |
f88df14b DG |
139 | #ifndef _PMD_PRESENT_MASK |
140 | #define _PMD_PRESENT_MASK _PMD_PRESENT | |
141 | #endif | |
142 | #ifndef _PMD_SIZE | |
143 | #define _PMD_SIZE 0 | |
144 | #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() | |
145 | #endif | |
146 | ||
8d30c14c BH |
147 | #define _PAGE_HPTEFLAGS _PAGE_HASHPTE |
148 | ||
fbc78b07 PG |
149 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
150 | _PAGE_SPECIAL) | |
a1f242ff | 151 | |
f5ea64dc DG |
152 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ |
153 | _PAGE_WRITETHRU | _PAGE_ENDIAN | \ | |
154 | _PAGE_USER | _PAGE_ACCESSED | \ | |
155 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ | |
156 | _PAGE_EXEC | _PAGE_HWEXEC) | |
64b3d0e8 | 157 | |
f88df14b | 158 | /* |
64b3d0e8 BH |
159 | * We define 2 sets of base prot bits, one for basic pages (ie, |
160 | * cacheable kernel and user pages) and one for non cacheable | |
161 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | |
162 | * the processor might need it for DMA coherency. | |
f88df14b | 163 | */ |
64b3d0e8 BH |
164 | #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) |
165 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | |
f88df14b DG |
166 | #else |
167 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | |
168 | #endif | |
64b3d0e8 BH |
169 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) |
170 | ||
f88df14b DG |
171 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) |
172 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) | |
64b3d0e8 | 173 | #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) |
f88df14b DG |
174 | |
175 | #ifdef CONFIG_PPC_STD_MMU | |
176 | /* On standard PPC MMU, no user access implies kernel read/write access, | |
177 | * so to write-protect kernel memory we must turn on user access */ | |
178 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) | |
179 | #else | |
180 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) | |
181 | #endif | |
182 | ||
64b3d0e8 | 183 | #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) |
f88df14b DG |
184 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) |
185 | ||
221ac329 IN |
186 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
187 | defined(CONFIG_KPROBES) | |
f88df14b DG |
188 | /* We want the debuggers to be able to set breakpoints anywhere, so |
189 | * don't write protect the kernel text */ | |
190 | #define _PAGE_RAM_TEXT _PAGE_RAM | |
191 | #else | |
192 | #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) | |
193 | #endif | |
194 | ||
195 | #define PAGE_NONE __pgprot(_PAGE_BASE) | |
196 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | |
197 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
198 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | |
199 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | |
200 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | |
201 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
202 | ||
203 | #define PAGE_KERNEL __pgprot(_PAGE_RAM) | |
204 | #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) | |
205 | ||
206 | /* | |
207 | * The PowerPC can only do execute protection on a segment (256MB) basis, | |
208 | * not on a page basis. So we consider execute permission the same as read. | |
209 | * Also, write permissions imply read permissions. | |
210 | * This is the closest we can get.. | |
211 | */ | |
212 | #define __P000 PAGE_NONE | |
213 | #define __P001 PAGE_READONLY_X | |
214 | #define __P010 PAGE_COPY | |
215 | #define __P011 PAGE_COPY_X | |
216 | #define __P100 PAGE_READONLY | |
217 | #define __P101 PAGE_READONLY_X | |
218 | #define __P110 PAGE_COPY | |
219 | #define __P111 PAGE_COPY_X | |
220 | ||
221 | #define __S000 PAGE_NONE | |
222 | #define __S001 PAGE_READONLY_X | |
223 | #define __S010 PAGE_SHARED | |
224 | #define __S011 PAGE_SHARED_X | |
225 | #define __S100 PAGE_READONLY | |
226 | #define __S101 PAGE_READONLY_X | |
227 | #define __S110 PAGE_SHARED | |
228 | #define __S111 PAGE_SHARED_X | |
229 | ||
230 | #ifndef __ASSEMBLY__ | |
231 | /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a | |
232 | * kernel without large page PMD support */ | |
233 | extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |
234 | ||
235 | /* | |
236 | * Conversions between PTE values and page frame numbers. | |
237 | */ | |
238 | ||
239 | /* in some case we want to additionaly adjust where the pfn is in the pte to | |
240 | * allow room for more flags */ | |
241 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) | |
242 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) | |
243 | #else | |
244 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) | |
245 | #endif | |
246 | ||
247 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) | |
248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
249 | ||
250 | #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ | |
251 | pgprot_val(prot)) | |
252 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) | |
f88df14b DG |
253 | #endif /* __ASSEMBLY__ */ |
254 | ||
255 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) | |
256 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | |
9bf2b5cd KG |
257 | #define pte_clear(mm, addr, ptep) \ |
258 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | |
f88df14b DG |
259 | |
260 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
261 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
262 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
263 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | |
264 | ||
265 | #ifndef __ASSEMBLY__ | |
f88df14b DG |
266 | /* |
267 | * The following only work if pte_present() is true. | |
268 | * Undefined behaviour if not.. | |
269 | */ | |
f88df14b | 270 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
f88df14b DG |
271 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
272 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | |
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | |
9a62c051 | 274 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
f88df14b | 275 | |
f88df14b DG |
276 | static inline pte_t pte_wrprotect(pte_t pte) { |
277 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | |
f88df14b DG |
278 | static inline pte_t pte_mkclean(pte_t pte) { |
279 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | |
280 | static inline pte_t pte_mkold(pte_t pte) { | |
281 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
282 | ||
f88df14b DG |
283 | static inline pte_t pte_mkwrite(pte_t pte) { |
284 | pte_val(pte) |= _PAGE_RW; return pte; } | |
285 | static inline pte_t pte_mkdirty(pte_t pte) { | |
286 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
287 | static inline pte_t pte_mkyoung(pte_t pte) { | |
288 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
7e675137 | 289 | static inline pte_t pte_mkspecial(pte_t pte) { |
9a62c051 | 290 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } |
f5ea64dc | 291 | static inline pgprot_t pte_pgprot(pte_t pte) |
a1f242ff | 292 | { |
f5ea64dc | 293 | return __pgprot(pte_val(pte) & PAGE_PROT_BITS); |
a1f242ff | 294 | } |
f88df14b DG |
295 | |
296 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
297 | { | |
298 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | |
299 | return pte; | |
300 | } | |
301 | ||
302 | /* | |
303 | * When flushing the tlb entry for a page, we also need to flush the hash | |
304 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | |
305 | */ | |
306 | extern int flush_hash_pages(unsigned context, unsigned long va, | |
307 | unsigned long pmdval, int count); | |
308 | ||
309 | /* Add an HPTE to the hash table */ | |
310 | extern void add_hash_page(unsigned context, unsigned long va, | |
311 | unsigned long pmdval); | |
312 | ||
4ee7084e BB |
313 | /* Flush an entry from the TLB/hash table */ |
314 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | |
315 | unsigned long address); | |
316 | ||
f88df14b | 317 | /* |
c605782b BH |
318 | * PTE updates. This function is called whenever an existing |
319 | * valid PTE is updated. This does -not- include set_pte_at() | |
320 | * which nowadays only sets a new PTE. | |
321 | * | |
322 | * Depending on the type of MMU, we may need to use atomic updates | |
323 | * and the PTE may be either 32 or 64 bit wide. In the later case, | |
324 | * when using atomic updates, only the low part of the PTE is | |
325 | * accessed atomically. | |
f88df14b | 326 | * |
c605782b BH |
327 | * In addition, on 44x, we also maintain a global flag indicating |
328 | * that an executable user mapping was modified, which is needed | |
329 | * to properly flush the virtually tagged instruction cache of | |
330 | * those implementations. | |
f88df14b DG |
331 | */ |
332 | #ifndef CONFIG_PTE_64BIT | |
1bc54c03 BH |
333 | static inline unsigned long pte_update(pte_t *p, |
334 | unsigned long clr, | |
f88df14b DG |
335 | unsigned long set) |
336 | { | |
1bc54c03 | 337 | #ifdef PTE_ATOMIC_UPDATES |
f88df14b DG |
338 | unsigned long old, tmp; |
339 | ||
340 | __asm__ __volatile__("\ | |
341 | 1: lwarx %0,0,%3\n\ | |
342 | andc %1,%0,%4\n\ | |
343 | or %1,%1,%5\n" | |
344 | PPC405_ERR77(0,%3) | |
345 | " stwcx. %1,0,%3\n\ | |
346 | bne- 1b" | |
347 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
348 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | |
349 | : "cc" ); | |
1bc54c03 BH |
350 | #else /* PTE_ATOMIC_UPDATES */ |
351 | unsigned long old = pte_val(*p); | |
352 | *p = __pte((old & ~clr) | set); | |
353 | #endif /* !PTE_ATOMIC_UPDATES */ | |
354 | ||
b98ac05d BH |
355 | #ifdef CONFIG_44x |
356 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | |
357 | icache_44x_need_flush = 1; | |
358 | #endif | |
f88df14b DG |
359 | return old; |
360 | } | |
1bc54c03 | 361 | #else /* CONFIG_PTE_64BIT */ |
1bc54c03 BH |
362 | static inline unsigned long long pte_update(pte_t *p, |
363 | unsigned long clr, | |
364 | unsigned long set) | |
f88df14b | 365 | { |
1bc54c03 | 366 | #ifdef PTE_ATOMIC_UPDATES |
f88df14b DG |
367 | unsigned long long old; |
368 | unsigned long tmp; | |
369 | ||
370 | __asm__ __volatile__("\ | |
371 | 1: lwarx %L0,0,%4\n\ | |
372 | lwzx %0,0,%3\n\ | |
373 | andc %1,%L0,%5\n\ | |
374 | or %1,%1,%6\n" | |
375 | PPC405_ERR77(0,%3) | |
376 | " stwcx. %1,0,%4\n\ | |
377 | bne- 1b" | |
378 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
379 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | |
380 | : "cc" ); | |
1bc54c03 BH |
381 | #else /* PTE_ATOMIC_UPDATES */ |
382 | unsigned long long old = pte_val(*p); | |
585583d9 | 383 | *p = __pte((old & ~(unsigned long long)clr) | set); |
1bc54c03 BH |
384 | #endif /* !PTE_ATOMIC_UPDATES */ |
385 | ||
b98ac05d BH |
386 | #ifdef CONFIG_44x |
387 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | |
388 | icache_44x_need_flush = 1; | |
389 | #endif | |
f88df14b DG |
390 | return old; |
391 | } | |
1bc54c03 | 392 | #endif /* CONFIG_PTE_64BIT */ |
f88df14b | 393 | |
f88df14b | 394 | /* |
bf2737f7 BB |
395 | * 2.6 calls this without flushing the TLB entry; this is wrong |
396 | * for our hash-based implementation, we fix that up here. | |
f88df14b DG |
397 | */ |
398 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
399 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | |
400 | { | |
401 | unsigned long old; | |
402 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | |
403 | #if _PAGE_HASHPTE != 0 | |
404 | if (old & _PAGE_HASHPTE) { | |
405 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | |
406 | flush_hash_pages(context, addr, ptephys, 1); | |
407 | } | |
408 | #endif | |
409 | return (old & _PAGE_ACCESSED) != 0; | |
410 | } | |
411 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
412 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | |
413 | ||
f88df14b DG |
414 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
415 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
416 | pte_t *ptep) | |
417 | { | |
418 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | |
419 | } | |
420 | ||
421 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
422 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
423 | pte_t *ptep) | |
424 | { | |
425 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); | |
426 | } | |
016b33c4 AW |
427 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
428 | unsigned long addr, pte_t *ptep) | |
429 | { | |
430 | ptep_set_wrprotect(mm, addr, ptep); | |
431 | } | |
432 | ||
f88df14b | 433 | |
8d30c14c | 434 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) |
f88df14b DG |
435 | { |
436 | unsigned long bits = pte_val(entry) & | |
8d30c14c BH |
437 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | |
438 | _PAGE_HWEXEC | _PAGE_EXEC); | |
f88df14b DG |
439 | pte_update(ptep, 0, bits); |
440 | } | |
441 | ||
f88df14b DG |
442 | #define __HAVE_ARCH_PTE_SAME |
443 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | |
444 | ||
445 | /* | |
446 | * Note that on Book E processors, the pmd contains the kernel virtual | |
447 | * (lowmem) address of the pte page. The physical address is less useful | |
448 | * because everything runs with translation enabled (even the TLB miss | |
449 | * handler). On everything else the pmd contains the physical address | |
450 | * of the pte page. -- paulus | |
451 | */ | |
452 | #ifndef CONFIG_BOOKE | |
453 | #define pmd_page_vaddr(pmd) \ | |
454 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | |
455 | #define pmd_page(pmd) \ | |
456 | (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) | |
457 | #else | |
458 | #define pmd_page_vaddr(pmd) \ | |
459 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | |
460 | #define pmd_page(pmd) \ | |
af892e0f | 461 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
f88df14b DG |
462 | #endif |
463 | ||
464 | /* to find an entry in a kernel page-table-directory */ | |
465 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
466 | ||
467 | /* to find an entry in a page-table-directory */ | |
468 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
469 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
470 | ||
f88df14b DG |
471 | /* Find an entry in the third-level page table.. */ |
472 | #define pte_index(address) \ | |
473 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
474 | #define pte_offset_kernel(dir, addr) \ | |
475 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) | |
476 | #define pte_offset_map(dir, addr) \ | |
477 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) | |
478 | #define pte_offset_map_nested(dir, addr) \ | |
479 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) | |
480 | ||
481 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | |
482 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
483 | ||
f88df14b DG |
484 | /* |
485 | * Encode and decode a swap entry. | |
486 | * Note that the bits we use in a PTE for representing a swap entry | |
487 | * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the | |
488 | *_PAGE_HASHPTE bit (if used). -- paulus | |
489 | */ | |
490 | #define __swp_type(entry) ((entry).val & 0x1f) | |
491 | #define __swp_offset(entry) ((entry).val >> 5) | |
492 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
493 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
494 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
495 | ||
496 | /* Encode and decode a nonlinear file mapping entry */ | |
497 | #define PTE_FILE_MAX_BITS 29 | |
498 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | |
499 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) | |
500 | ||
f88df14b DG |
501 | /* |
502 | * No page table caches to initialise | |
503 | */ | |
504 | #define pgtable_cache_init() do { } while (0) | |
505 | ||
506 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | |
507 | pmd_t **pmdp); | |
508 | ||
509 | #endif /* !__ASSEMBLY__ */ | |
510 | ||
511 | #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */ |