powerpc/mm: Merge various PTE bits and accessors definitions
[deliverable/linux.git] / arch / powerpc / include / asm / pgtable-ppc32.h
CommitLineData
f88df14b
DG
1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
d1953c88 4#include <asm-generic/pgtable-nopmd.h>
f88df14b
DG
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
f88df14b 9#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
f88df14b
DG
10
11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot, ioremap_base;
b98ac05d
BH
14
15#ifdef CONFIG_44x
16extern int icache_44x_need_flush;
17#endif
18
f88df14b
DG
19#endif /* __ASSEMBLY__ */
20
f88df14b
DG
21/*
22 * The normal case is that PTEs are 32-bits and we have a 1-page
23 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
24 *
25 * For any >32-bit physical address platform, we can use the following
26 * two level page table layout where the pgdir is 8KB and the MS 13 bits
27 * are an index to the second level table. The combined pgdir/pmd first
28 * level has 2048 entries and the second level has 512 64-bit PTE entries.
29 * -Matt
30 */
f88df14b 31/* PGDIR_SHIFT determines what a top-level page table entry can map */
d1953c88 32#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
f88df14b
DG
33#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
34#define PGDIR_MASK (~(PGDIR_SIZE-1))
35
36/*
37 * entries per page directory level: our page-table tree is two-level, so
38 * we don't really have any PMD directory.
39 */
bee86f14
KG
40#ifndef __ASSEMBLY__
41#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
42#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
43#endif /* __ASSEMBLY__ */
44
f88df14b
DG
45#define PTRS_PER_PTE (1 << PTE_SHIFT)
46#define PTRS_PER_PMD 1
47#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
48
49#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
50#define FIRST_USER_ADDRESS 0
51
f88df14b 52#define pte_ERROR(e) \
0aeafb0c
DG
53 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
54 (unsigned long long)pte_val(e))
f88df14b
DG
55#define pgd_ERROR(e) \
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57
58/*
59 * Just any arbitrary offset to the start of the vmalloc VM area: the
60 * current 64MB value just means that there will be a 64MB "hole" after the
61 * physical memory until the kernel virtual memory starts. That means that
62 * any out-of-bounds memory accesses will hopefully be caught.
63 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
64 * area for the same reason. ;)
65 *
66 * We no longer map larger than phys RAM with the BATs so we don't have
67 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
68 * about clashes between our early calls to ioremap() that start growing down
69 * from ioremap_base being run into the VM area allocations (growing upwards
70 * from VMALLOC_START). For this reason we have ioremap_bot to check when
71 * we actually run into our mappings setup in the early boot with the VM
72 * system. This really does become a problem for machines with good amounts
73 * of RAM. -- Cort
74 */
75#define VMALLOC_OFFSET (0x1000000) /* 16M */
76#ifdef PPC_PIN_SIZE
77#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
78#else
79#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
80#endif
81#define VMALLOC_END ioremap_bot
82
83/*
84 * Bits in a linux-style PTE. These match the bits in the
85 * (hardware-defined) PowerPC PTE as closely as possible.
86 */
87
88#if defined(CONFIG_40x)
c605782b 89#include <asm/pte-40x.h>
f88df14b 90#elif defined(CONFIG_44x)
c605782b 91#include <asm/pte-44x.h>
f88df14b 92#elif defined(CONFIG_FSL_BOOKE)
c605782b 93#include <asm/pte-fsl-booke.h>
f88df14b 94#elif defined(CONFIG_8xx)
c605782b 95#include <asm/pte-8xx.h>
f88df14b 96#else /* CONFIG_6xx */
c605782b 97#include <asm/pte-hash32.h>
4ee7084e 98#endif
f88df14b 99
71087002
BH
100/* And here we include common definitions */
101#include <asm/pte-common.h>
f88df14b
DG
102
103#ifndef __ASSEMBLY__
f88df14b 104
9bf2b5cd
KG
105#define pte_clear(mm, addr, ptep) \
106 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
f88df14b
DG
107
108#define pmd_none(pmd) (!pmd_val(pmd))
109#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
110#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
111#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
112
f88df14b
DG
113/*
114 * When flushing the tlb entry for a page, we also need to flush the hash
115 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
116 */
117extern int flush_hash_pages(unsigned context, unsigned long va,
118 unsigned long pmdval, int count);
119
120/* Add an HPTE to the hash table */
121extern void add_hash_page(unsigned context, unsigned long va,
122 unsigned long pmdval);
123
4ee7084e
BB
124/* Flush an entry from the TLB/hash table */
125extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
126 unsigned long address);
127
f88df14b 128/*
c605782b
BH
129 * PTE updates. This function is called whenever an existing
130 * valid PTE is updated. This does -not- include set_pte_at()
131 * which nowadays only sets a new PTE.
132 *
133 * Depending on the type of MMU, we may need to use atomic updates
134 * and the PTE may be either 32 or 64 bit wide. In the later case,
135 * when using atomic updates, only the low part of the PTE is
136 * accessed atomically.
f88df14b 137 *
c605782b
BH
138 * In addition, on 44x, we also maintain a global flag indicating
139 * that an executable user mapping was modified, which is needed
140 * to properly flush the virtually tagged instruction cache of
141 * those implementations.
f88df14b
DG
142 */
143#ifndef CONFIG_PTE_64BIT
1bc54c03
BH
144static inline unsigned long pte_update(pte_t *p,
145 unsigned long clr,
f88df14b
DG
146 unsigned long set)
147{
1bc54c03 148#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
149 unsigned long old, tmp;
150
151 __asm__ __volatile__("\
1521: lwarx %0,0,%3\n\
153 andc %1,%0,%4\n\
154 or %1,%1,%5\n"
155 PPC405_ERR77(0,%3)
156" stwcx. %1,0,%3\n\
157 bne- 1b"
158 : "=&r" (old), "=&r" (tmp), "=m" (*p)
159 : "r" (p), "r" (clr), "r" (set), "m" (*p)
160 : "cc" );
1bc54c03
BH
161#else /* PTE_ATOMIC_UPDATES */
162 unsigned long old = pte_val(*p);
163 *p = __pte((old & ~clr) | set);
164#endif /* !PTE_ATOMIC_UPDATES */
165
b98ac05d
BH
166#ifdef CONFIG_44x
167 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
168 icache_44x_need_flush = 1;
169#endif
f88df14b
DG
170 return old;
171}
1bc54c03 172#else /* CONFIG_PTE_64BIT */
1bc54c03
BH
173static inline unsigned long long pte_update(pte_t *p,
174 unsigned long clr,
175 unsigned long set)
f88df14b 176{
1bc54c03 177#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
178 unsigned long long old;
179 unsigned long tmp;
180
181 __asm__ __volatile__("\
1821: lwarx %L0,0,%4\n\
183 lwzx %0,0,%3\n\
184 andc %1,%L0,%5\n\
185 or %1,%1,%6\n"
186 PPC405_ERR77(0,%3)
187" stwcx. %1,0,%4\n\
188 bne- 1b"
189 : "=&r" (old), "=&r" (tmp), "=m" (*p)
190 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
191 : "cc" );
1bc54c03
BH
192#else /* PTE_ATOMIC_UPDATES */
193 unsigned long long old = pte_val(*p);
585583d9 194 *p = __pte((old & ~(unsigned long long)clr) | set);
1bc54c03
BH
195#endif /* !PTE_ATOMIC_UPDATES */
196
b98ac05d
BH
197#ifdef CONFIG_44x
198 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
199 icache_44x_need_flush = 1;
200#endif
f88df14b
DG
201 return old;
202}
1bc54c03 203#endif /* CONFIG_PTE_64BIT */
f88df14b 204
f88df14b 205/*
bf2737f7
BB
206 * 2.6 calls this without flushing the TLB entry; this is wrong
207 * for our hash-based implementation, we fix that up here.
f88df14b
DG
208 */
209#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
210static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
211{
212 unsigned long old;
213 old = pte_update(ptep, _PAGE_ACCESSED, 0);
214#if _PAGE_HASHPTE != 0
215 if (old & _PAGE_HASHPTE) {
216 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
217 flush_hash_pages(context, addr, ptephys, 1);
218 }
219#endif
220 return (old & _PAGE_ACCESSED) != 0;
221}
222#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
223 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
224
f88df14b
DG
225#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
226static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
227 pte_t *ptep)
228{
229 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
230}
231
232#define __HAVE_ARCH_PTEP_SET_WRPROTECT
233static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
234 pte_t *ptep)
235{
236 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
237}
016b33c4
AW
238static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
239 unsigned long addr, pte_t *ptep)
240{
241 ptep_set_wrprotect(mm, addr, ptep);
242}
243
f88df14b 244
8d30c14c 245static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
f88df14b
DG
246{
247 unsigned long bits = pte_val(entry) &
8d30c14c
BH
248 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
249 _PAGE_HWEXEC | _PAGE_EXEC);
f88df14b
DG
250 pte_update(ptep, 0, bits);
251}
252
f88df14b
DG
253#define __HAVE_ARCH_PTE_SAME
254#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
255
256/*
257 * Note that on Book E processors, the pmd contains the kernel virtual
258 * (lowmem) address of the pte page. The physical address is less useful
259 * because everything runs with translation enabled (even the TLB miss
260 * handler). On everything else the pmd contains the physical address
261 * of the pte page. -- paulus
262 */
263#ifndef CONFIG_BOOKE
264#define pmd_page_vaddr(pmd) \
265 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
266#define pmd_page(pmd) \
267 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
268#else
269#define pmd_page_vaddr(pmd) \
270 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
271#define pmd_page(pmd) \
af892e0f 272 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
f88df14b
DG
273#endif
274
275/* to find an entry in a kernel page-table-directory */
276#define pgd_offset_k(address) pgd_offset(&init_mm, address)
277
278/* to find an entry in a page-table-directory */
279#define pgd_index(address) ((address) >> PGDIR_SHIFT)
280#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
281
f88df14b
DG
282/* Find an entry in the third-level page table.. */
283#define pte_index(address) \
284 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
285#define pte_offset_kernel(dir, addr) \
286 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
287#define pte_offset_map(dir, addr) \
288 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
289#define pte_offset_map_nested(dir, addr) \
290 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
291
292#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
293#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
294
f88df14b
DG
295/*
296 * Encode and decode a swap entry.
297 * Note that the bits we use in a PTE for representing a swap entry
298 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
299 *_PAGE_HASHPTE bit (if used). -- paulus
300 */
301#define __swp_type(entry) ((entry).val & 0x1f)
302#define __swp_offset(entry) ((entry).val >> 5)
303#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
304#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
305#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
306
307/* Encode and decode a nonlinear file mapping entry */
308#define PTE_FILE_MAX_BITS 29
309#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
310#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
311
f88df14b
DG
312/*
313 * No page table caches to initialise
314 */
315#define pgtable_cache_init() do { } while (0)
316
317extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
318 pmd_t **pmdp);
319
320#endif /* !__ASSEMBLY__ */
321
322#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
This page took 0.234755 seconds and 5 git commands to generate.