67ceffc01b432b38e7cf2cf4daf7874c6986d905
[deliverable/linux.git] / arch / powerpc / include / asm / pgtable-ppc32.h
1 #ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2 #define _ASM_POWERPC_PGTABLE_PPC32_H
3
4 #include <asm-generic/pgtable-nopmd.h>
5
6 #ifndef __ASSEMBLY__
7 #include <linux/sched.h>
8 #include <linux/threads.h>
9 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
10
11 extern unsigned long va_to_phys(unsigned long address);
12 extern pte_t *va_to_pte(unsigned long address);
13 extern unsigned long ioremap_bot, ioremap_base;
14
15 #ifdef CONFIG_44x
16 extern int icache_44x_need_flush;
17 #endif
18
19 #endif /* __ASSEMBLY__ */
20
21 /*
22 * The normal case is that PTEs are 32-bits and we have a 1-page
23 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
24 *
25 * For any >32-bit physical address platform, we can use the following
26 * two level page table layout where the pgdir is 8KB and the MS 13 bits
27 * are an index to the second level table. The combined pgdir/pmd first
28 * level has 2048 entries and the second level has 512 64-bit PTE entries.
29 * -Matt
30 */
31 /* PGDIR_SHIFT determines what a top-level page table entry can map */
32 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
33 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
34 #define PGDIR_MASK (~(PGDIR_SIZE-1))
35
36 /*
37 * entries per page directory level: our page-table tree is two-level, so
38 * we don't really have any PMD directory.
39 */
40 #ifndef __ASSEMBLY__
41 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
42 #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
43 #endif /* __ASSEMBLY__ */
44
45 #define PTRS_PER_PTE (1 << PTE_SHIFT)
46 #define PTRS_PER_PMD 1
47 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
48
49 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
50 #define FIRST_USER_ADDRESS 0
51
52 #define pte_ERROR(e) \
53 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
54 (unsigned long long)pte_val(e))
55 #define pgd_ERROR(e) \
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57
58 /*
59 * Just any arbitrary offset to the start of the vmalloc VM area: the
60 * current 64MB value just means that there will be a 64MB "hole" after the
61 * physical memory until the kernel virtual memory starts. That means that
62 * any out-of-bounds memory accesses will hopefully be caught.
63 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
64 * area for the same reason. ;)
65 *
66 * We no longer map larger than phys RAM with the BATs so we don't have
67 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
68 * about clashes between our early calls to ioremap() that start growing down
69 * from ioremap_base being run into the VM area allocations (growing upwards
70 * from VMALLOC_START). For this reason we have ioremap_bot to check when
71 * we actually run into our mappings setup in the early boot with the VM
72 * system. This really does become a problem for machines with good amounts
73 * of RAM. -- Cort
74 */
75 #define VMALLOC_OFFSET (0x1000000) /* 16M */
76 #ifdef PPC_PIN_SIZE
77 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
78 #else
79 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
80 #endif
81 #define VMALLOC_END ioremap_bot
82
83 /*
84 * Bits in a linux-style PTE. These match the bits in the
85 * (hardware-defined) PowerPC PTE as closely as possible.
86 */
87
88 #if defined(CONFIG_40x)
89 #include <asm/pte-40x.h>
90 #elif defined(CONFIG_44x)
91 #include <asm/pte-44x.h>
92 #elif defined(CONFIG_FSL_BOOKE)
93 #include <asm/pte-fsl-booke.h>
94 #elif defined(CONFIG_8xx)
95 #include <asm/pte-8xx.h>
96 #else /* CONFIG_6xx */
97 #include <asm/pte-hash32.h>
98 #endif
99
100 /* If _PAGE_SPECIAL is defined, then we advertise our support for it */
101 #ifdef _PAGE_SPECIAL
102 #define __HAVE_ARCH_PTE_SPECIAL
103 #endif
104
105 /*
106 * Some bits are only used on some cpu families... Make sure that all
107 * the undefined gets defined as 0
108 */
109 #ifndef _PAGE_HASHPTE
110 #define _PAGE_HASHPTE 0
111 #endif
112 #ifndef _PTE_NONE_MASK
113 #define _PTE_NONE_MASK 0
114 #endif
115 #ifndef _PAGE_SHARED
116 #define _PAGE_SHARED 0
117 #endif
118 #ifndef _PAGE_HWWRITE
119 #define _PAGE_HWWRITE 0
120 #endif
121 #ifndef _PAGE_HWEXEC
122 #define _PAGE_HWEXEC 0
123 #endif
124 #ifndef _PAGE_EXEC
125 #define _PAGE_EXEC 0
126 #endif
127 #ifndef _PAGE_ENDIAN
128 #define _PAGE_ENDIAN 0
129 #endif
130 #ifndef _PAGE_COHERENT
131 #define _PAGE_COHERENT 0
132 #endif
133 #ifndef _PAGE_WRITETHRU
134 #define _PAGE_WRITETHRU 0
135 #endif
136 #ifndef _PAGE_SPECIAL
137 #define _PAGE_SPECIAL 0
138 #endif
139 #ifndef _PMD_PRESENT_MASK
140 #define _PMD_PRESENT_MASK _PMD_PRESENT
141 #endif
142 #ifndef _PMD_SIZE
143 #define _PMD_SIZE 0
144 #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
145 #endif
146
147 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
148
149 /* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
150 * here (ie, naturally aligned). Platform who don't just pre-define the
151 * value so we don't override it here
152 */
153 #ifndef PTE_RPN_SHIFT
154 #define PTE_RPN_SHIFT (PAGE_SHIFT)
155 #endif
156
157 #ifdef CONFIG_PTE_64BIT
158 #define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
159 #define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
160 #else
161 #define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
162 #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
163 #endif
164
165 /* _PAGE_CHG_MASK masks of bits that are to be preserved accross
166 * pgprot changes
167 */
168 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
169 _PAGE_ACCESSED | _PAGE_SPECIAL)
170
171 /* Mask of bits returned by pte_pgprot() */
172 #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
173 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
174 _PAGE_USER | _PAGE_ACCESSED | \
175 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
176 _PAGE_EXEC | _PAGE_HWEXEC)
177
178 /*
179 * We define 2 sets of base prot bits, one for basic pages (ie,
180 * cacheable kernel and user pages) and one for non cacheable
181 * pages. We always set _PAGE_COHERENT when SMP is enabled or
182 * the processor might need it for DMA coherency.
183 */
184 #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
185 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
186 #else
187 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
188 #endif
189 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
190
191 #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
192 #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
193 #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
194
195 #ifdef CONFIG_PPC_STD_MMU
196 /* On standard PPC MMU, no user access implies kernel read/write access,
197 * so to write-protect kernel memory we must turn on user access */
198 #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
199 #else
200 #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
201 #endif
202
203 #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
204 #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
205
206 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
207 defined(CONFIG_KPROBES)
208 /* We want the debuggers to be able to set breakpoints anywhere, so
209 * don't write protect the kernel text */
210 #define _PAGE_RAM_TEXT _PAGE_RAM
211 #else
212 #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
213 #endif
214
215 #define PAGE_NONE __pgprot(_PAGE_BASE)
216 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
217 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
218 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
219 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
220 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
221 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
222
223 #define PAGE_KERNEL __pgprot(_PAGE_RAM)
224 #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
225
226 /*
227 * The PowerPC can only do execute protection on a segment (256MB) basis,
228 * not on a page basis. So we consider execute permission the same as read.
229 * Also, write permissions imply read permissions.
230 * This is the closest we can get..
231 */
232 #define __P000 PAGE_NONE
233 #define __P001 PAGE_READONLY_X
234 #define __P010 PAGE_COPY
235 #define __P011 PAGE_COPY_X
236 #define __P100 PAGE_READONLY
237 #define __P101 PAGE_READONLY_X
238 #define __P110 PAGE_COPY
239 #define __P111 PAGE_COPY_X
240
241 #define __S000 PAGE_NONE
242 #define __S001 PAGE_READONLY_X
243 #define __S010 PAGE_SHARED
244 #define __S011 PAGE_SHARED_X
245 #define __S100 PAGE_READONLY
246 #define __S101 PAGE_READONLY_X
247 #define __S110 PAGE_SHARED
248 #define __S111 PAGE_SHARED_X
249
250 #ifndef __ASSEMBLY__
251 /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
252 * kernel without large page PMD support */
253 extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
254
255 /*
256 * Conversions between PTE values and page frame numbers.
257 */
258
259 #define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT)
260 #define pte_page(x) pfn_to_page(pte_pfn(x))
261
262 #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\
263 pgprot_val(prot))
264 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
265 #endif /* __ASSEMBLY__ */
266
267 #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
268 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
269 #define pte_clear(mm, addr, ptep) \
270 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
271
272 #define pmd_none(pmd) (!pmd_val(pmd))
273 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
274 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
275 #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
276
277 #ifndef __ASSEMBLY__
278 /*
279 * The following only work if pte_present() is true.
280 * Undefined behaviour if not..
281 */
282 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
283 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
284 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
285 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
286 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
287
288 static inline pte_t pte_wrprotect(pte_t pte) {
289 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
290 static inline pte_t pte_mkclean(pte_t pte) {
291 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
292 static inline pte_t pte_mkold(pte_t pte) {
293 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
294
295 static inline pte_t pte_mkwrite(pte_t pte) {
296 pte_val(pte) |= _PAGE_RW; return pte; }
297 static inline pte_t pte_mkdirty(pte_t pte) {
298 pte_val(pte) |= _PAGE_DIRTY; return pte; }
299 static inline pte_t pte_mkyoung(pte_t pte) {
300 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
301 static inline pte_t pte_mkspecial(pte_t pte) {
302 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
303 static inline pgprot_t pte_pgprot(pte_t pte)
304 {
305 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
306 }
307
308 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
309 {
310 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
311 return pte;
312 }
313
314 /*
315 * When flushing the tlb entry for a page, we also need to flush the hash
316 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
317 */
318 extern int flush_hash_pages(unsigned context, unsigned long va,
319 unsigned long pmdval, int count);
320
321 /* Add an HPTE to the hash table */
322 extern void add_hash_page(unsigned context, unsigned long va,
323 unsigned long pmdval);
324
325 /* Flush an entry from the TLB/hash table */
326 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
327 unsigned long address);
328
329 /*
330 * PTE updates. This function is called whenever an existing
331 * valid PTE is updated. This does -not- include set_pte_at()
332 * which nowadays only sets a new PTE.
333 *
334 * Depending on the type of MMU, we may need to use atomic updates
335 * and the PTE may be either 32 or 64 bit wide. In the later case,
336 * when using atomic updates, only the low part of the PTE is
337 * accessed atomically.
338 *
339 * In addition, on 44x, we also maintain a global flag indicating
340 * that an executable user mapping was modified, which is needed
341 * to properly flush the virtually tagged instruction cache of
342 * those implementations.
343 */
344 #ifndef CONFIG_PTE_64BIT
345 static inline unsigned long pte_update(pte_t *p,
346 unsigned long clr,
347 unsigned long set)
348 {
349 #ifdef PTE_ATOMIC_UPDATES
350 unsigned long old, tmp;
351
352 __asm__ __volatile__("\
353 1: lwarx %0,0,%3\n\
354 andc %1,%0,%4\n\
355 or %1,%1,%5\n"
356 PPC405_ERR77(0,%3)
357 " stwcx. %1,0,%3\n\
358 bne- 1b"
359 : "=&r" (old), "=&r" (tmp), "=m" (*p)
360 : "r" (p), "r" (clr), "r" (set), "m" (*p)
361 : "cc" );
362 #else /* PTE_ATOMIC_UPDATES */
363 unsigned long old = pte_val(*p);
364 *p = __pte((old & ~clr) | set);
365 #endif /* !PTE_ATOMIC_UPDATES */
366
367 #ifdef CONFIG_44x
368 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
369 icache_44x_need_flush = 1;
370 #endif
371 return old;
372 }
373 #else /* CONFIG_PTE_64BIT */
374 static inline unsigned long long pte_update(pte_t *p,
375 unsigned long clr,
376 unsigned long set)
377 {
378 #ifdef PTE_ATOMIC_UPDATES
379 unsigned long long old;
380 unsigned long tmp;
381
382 __asm__ __volatile__("\
383 1: lwarx %L0,0,%4\n\
384 lwzx %0,0,%3\n\
385 andc %1,%L0,%5\n\
386 or %1,%1,%6\n"
387 PPC405_ERR77(0,%3)
388 " stwcx. %1,0,%4\n\
389 bne- 1b"
390 : "=&r" (old), "=&r" (tmp), "=m" (*p)
391 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
392 : "cc" );
393 #else /* PTE_ATOMIC_UPDATES */
394 unsigned long long old = pte_val(*p);
395 *p = __pte((old & ~(unsigned long long)clr) | set);
396 #endif /* !PTE_ATOMIC_UPDATES */
397
398 #ifdef CONFIG_44x
399 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
400 icache_44x_need_flush = 1;
401 #endif
402 return old;
403 }
404 #endif /* CONFIG_PTE_64BIT */
405
406 /*
407 * 2.6 calls this without flushing the TLB entry; this is wrong
408 * for our hash-based implementation, we fix that up here.
409 */
410 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
411 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
412 {
413 unsigned long old;
414 old = pte_update(ptep, _PAGE_ACCESSED, 0);
415 #if _PAGE_HASHPTE != 0
416 if (old & _PAGE_HASHPTE) {
417 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
418 flush_hash_pages(context, addr, ptephys, 1);
419 }
420 #endif
421 return (old & _PAGE_ACCESSED) != 0;
422 }
423 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
424 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
425
426 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
427 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
428 pte_t *ptep)
429 {
430 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
431 }
432
433 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
434 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
435 pte_t *ptep)
436 {
437 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
438 }
439 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
440 unsigned long addr, pte_t *ptep)
441 {
442 ptep_set_wrprotect(mm, addr, ptep);
443 }
444
445
446 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
447 {
448 unsigned long bits = pte_val(entry) &
449 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
450 _PAGE_HWEXEC | _PAGE_EXEC);
451 pte_update(ptep, 0, bits);
452 }
453
454 #define __HAVE_ARCH_PTE_SAME
455 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
456
457 /*
458 * Note that on Book E processors, the pmd contains the kernel virtual
459 * (lowmem) address of the pte page. The physical address is less useful
460 * because everything runs with translation enabled (even the TLB miss
461 * handler). On everything else the pmd contains the physical address
462 * of the pte page. -- paulus
463 */
464 #ifndef CONFIG_BOOKE
465 #define pmd_page_vaddr(pmd) \
466 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
467 #define pmd_page(pmd) \
468 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
469 #else
470 #define pmd_page_vaddr(pmd) \
471 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
472 #define pmd_page(pmd) \
473 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
474 #endif
475
476 /* to find an entry in a kernel page-table-directory */
477 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
478
479 /* to find an entry in a page-table-directory */
480 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
481 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
482
483 /* Find an entry in the third-level page table.. */
484 #define pte_index(address) \
485 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
486 #define pte_offset_kernel(dir, addr) \
487 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
488 #define pte_offset_map(dir, addr) \
489 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
490 #define pte_offset_map_nested(dir, addr) \
491 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
492
493 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
494 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
495
496 /*
497 * Encode and decode a swap entry.
498 * Note that the bits we use in a PTE for representing a swap entry
499 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
500 *_PAGE_HASHPTE bit (if used). -- paulus
501 */
502 #define __swp_type(entry) ((entry).val & 0x1f)
503 #define __swp_offset(entry) ((entry).val >> 5)
504 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
505 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
506 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
507
508 /* Encode and decode a nonlinear file mapping entry */
509 #define PTE_FILE_MAX_BITS 29
510 #define pte_to_pgoff(pte) (pte_val(pte) >> 3)
511 #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
512
513 /*
514 * No page table caches to initialise
515 */
516 #define pgtable_cache_init() do { } while (0)
517
518 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
519 pmd_t **pmdp);
520
521 #endif /* !__ASSEMBLY__ */
522
523 #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
This page took 0.040871 seconds and 4 git commands to generate.