Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* pgtable.h: FR-V page table mangling |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * Derived from: | |
12 | * include/asm-m68knommu/pgtable.h | |
13 | * include/asm-i386/pgtable.h | |
14 | */ | |
15 | ||
16 | #ifndef _ASM_PGTABLE_H | |
17 | #define _ASM_PGTABLE_H | |
18 | ||
1da177e4 LT |
19 | #include <asm/mem-layout.h> |
20 | #include <asm/setup.h> | |
21 | #include <asm/processor.h> | |
22 | ||
23 | #ifndef __ASSEMBLY__ | |
24 | #include <linux/threads.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/spinlock.h> | |
95203aec | 28 | #include <linux/sched.h> |
8c65b4a6 | 29 | struct vm_area_struct; |
1da177e4 LT |
30 | #endif |
31 | ||
32 | #ifndef __ASSEMBLY__ | |
33 | #if defined(CONFIG_HIGHPTE) | |
34 | typedef unsigned long pte_addr_t; | |
35 | #else | |
36 | typedef pte_t *pte_addr_t; | |
37 | #endif | |
38 | #endif | |
39 | ||
40 | /*****************************************************************************/ | |
41 | /* | |
42 | * MMU-less operation case first | |
43 | */ | |
44 | #ifndef CONFIG_MMU | |
45 | ||
46 | #define pgd_present(pgd) (1) /* pages are always present on NO_MM */ | |
47 | #define pgd_none(pgd) (0) | |
48 | #define pgd_bad(pgd) (0) | |
49 | #define pgd_clear(pgdp) | |
50 | #define kern_addr_valid(addr) (1) | |
51 | #define pmd_offset(a, b) ((void *) 0) | |
52 | ||
53 | #define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ | |
54 | #define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ | |
55 | #define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ | |
56 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ | |
57 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ | |
58 | ||
59 | #define __swp_type(x) (0) | |
60 | #define __swp_offset(x) (0) | |
61 | #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) | |
62 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
63 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
64 | ||
1da177e4 LT |
65 | #define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) |
66 | ||
67 | #define swapper_pg_dir ((pgd_t *) NULL) | |
68 | ||
28936117 | 69 | #define pgtable_cache_init() do {} while (0) |
6fde836b DH |
70 | |
71 | #include <asm-generic/pgtable.h> | |
1da177e4 LT |
72 | |
73 | #else /* !CONFIG_MMU */ | |
74 | /*****************************************************************************/ | |
75 | /* | |
76 | * then MMU operation | |
77 | */ | |
78 | ||
79 | /* | |
80 | * ZERO_PAGE is a global shared page that is always zero: used | |
81 | * for zero-mapped memory areas etc.. | |
82 | */ | |
83 | #ifndef __ASSEMBLY__ | |
84 | extern unsigned long empty_zero_page; | |
85 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | |
86 | #endif | |
87 | ||
88 | /* | |
89 | * we use 2-level page tables, folding the PMD (mid-level table) into the PGE (top-level entry) | |
0868ff7a | 90 | * [see Documentation/frv/mmu-layout.txt] |
1da177e4 LT |
91 | * |
92 | * Page Directory: | |
93 | * - Size: 16KB | |
94 | * - 64 PGEs per PGD | |
95 | * - Each PGE holds 1 PUD and covers 64MB | |
96 | * | |
97 | * Page Upper Directory: | |
98 | * - Size: 256B | |
99 | * - 1 PUE per PUD | |
100 | * - Each PUE holds 1 PMD and covers 64MB | |
101 | * | |
102 | * Page Mid-Level Directory | |
103 | * - Size: 256B | |
104 | * - 1 PME per PMD | |
105 | * - Each PME holds 64 STEs, all of which point to separate chunks of the same Page Table | |
106 | * - All STEs are instantiated at the same time | |
107 | * | |
108 | * Page Table | |
109 | * - Size: 16KB | |
110 | * - 4096 PTEs per PT | |
111 | * - Each Linux PT is subdivided into 64 FR451 PT's, each of which holds 64 entries | |
112 | * | |
113 | * Pages | |
114 | * - Size: 4KB | |
115 | * | |
116 | * total PTEs | |
117 | * = 1 PML4E * 64 PGEs * 1 PUEs * 1 PMEs * 4096 PTEs | |
118 | * = 1 PML4E * 64 PGEs * 64 STEs * 64 PTEs/FR451-PT | |
119 | * = 262144 (or 256 * 1024) | |
120 | */ | |
121 | #define PGDIR_SHIFT 26 | |
122 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
123 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | |
124 | #define PTRS_PER_PGD 64 | |
125 | ||
c07af4f1 | 126 | #define __PAGETABLE_PUD_FOLDED |
1da177e4 LT |
127 | #define PUD_SHIFT 26 |
128 | #define PTRS_PER_PUD 1 | |
129 | #define PUD_SIZE (1UL << PUD_SHIFT) | |
130 | #define PUD_MASK (~(PUD_SIZE - 1)) | |
131 | #define PUE_SIZE 256 | |
132 | ||
c07af4f1 | 133 | #define __PAGETABLE_PMD_FOLDED |
1da177e4 LT |
134 | #define PMD_SHIFT 26 |
135 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
136 | #define PMD_MASK (~(PMD_SIZE - 1)) | |
137 | #define PTRS_PER_PMD 1 | |
138 | #define PME_SIZE 256 | |
139 | ||
140 | #define __frv_PT_SIZE 256 | |
141 | ||
142 | #define PTRS_PER_PTE 4096 | |
143 | ||
144 | #define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE) | |
d016bf7e | 145 | #define FIRST_USER_ADDRESS 0UL |
1da177e4 LT |
146 | |
147 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | |
148 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS) | |
149 | ||
150 | #define TWOLEVEL_PGDIR_SHIFT 26 | |
151 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | |
152 | #define BOOT_KERNEL_PGD_PTRS (PTRS_PER_PGD - BOOT_USER_PGD_PTRS) | |
153 | ||
154 | #ifndef __ASSEMBLY__ | |
155 | ||
156 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
157 | ||
158 | #define pte_ERROR(e) \ | |
159 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte) | |
160 | #define pmd_ERROR(e) \ | |
161 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
162 | #define pud_ERROR(e) \ | |
163 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e))) | |
164 | #define pgd_ERROR(e) \ | |
165 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e)))) | |
166 | ||
167 | /* | |
168 | * Certain architectures need to do special things when PTEs | |
169 | * within a page table are directly modified. Thus, the following | |
170 | * hook is made available. | |
171 | */ | |
172 | #define set_pte(pteptr, pteval) \ | |
173 | do { \ | |
174 | *(pteptr) = (pteval); \ | |
175 | asm volatile("dcf %M0" :: "U"(*pteptr)); \ | |
176 | } while(0) | |
177 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | |
178 | ||
1da177e4 LT |
179 | /* |
180 | * pgd_offset() returns a (pgd_t *) | |
181 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
182 | */ | |
183 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
184 | ||
185 | /* | |
186 | * a shortcut which implies the use of the kernel's pgd, instead | |
187 | * of a process's | |
188 | */ | |
189 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
190 | ||
191 | /* | |
192 | * The "pgd_xxx()" functions here are trivial for a folded two-level | |
193 | * setup: the pud is never bad, and a pud always exists (as it's folded | |
194 | * into the pgd entry) | |
195 | */ | |
196 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
197 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
198 | static inline int pgd_present(pgd_t pgd) { return 1; } | |
199 | static inline void pgd_clear(pgd_t *pgd) { } | |
200 | ||
201 | #define pgd_populate(mm, pgd, pud) do { } while (0) | |
202 | /* | |
203 | * (puds are folded into pgds so this doesn't get actually called, | |
204 | * but the define is needed for a generic inline function.) | |
205 | */ | |
206 | #define set_pgd(pgdptr, pgdval) \ | |
207 | do { \ | |
208 | memcpy((pgdptr), &(pgdval), sizeof(pgd_t)); \ | |
209 | asm volatile("dcf %M0" :: "U"(*(pgdptr))); \ | |
210 | } while(0) | |
211 | ||
212 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | |
213 | { | |
214 | return (pud_t *) pgd; | |
215 | } | |
216 | ||
217 | #define pgd_page(pgd) (pud_page((pud_t){ pgd })) | |
46a82b2d | 218 | #define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) |
1da177e4 LT |
219 | |
220 | /* | |
221 | * allocating and freeing a pud is trivial: the 1-entry pud is | |
222 | * inside the pgd, so has no extra memory associated with it. | |
223 | */ | |
224 | #define pud_alloc_one(mm, address) NULL | |
5e541973 | 225 | #define pud_free(mm, x) do { } while (0) |
9e1b32ca | 226 | #define __pud_free_tlb(tlb, x, address) do { } while (0) |
1da177e4 LT |
227 | |
228 | /* | |
229 | * The "pud_xxx()" functions here are trivial for a folded two-level | |
230 | * setup: the pmd is never bad, and a pmd always exists (as it's folded | |
231 | * into the pud entry) | |
232 | */ | |
233 | static inline int pud_none(pud_t pud) { return 0; } | |
234 | static inline int pud_bad(pud_t pud) { return 0; } | |
235 | static inline int pud_present(pud_t pud) { return 1; } | |
236 | static inline void pud_clear(pud_t *pud) { } | |
237 | ||
238 | #define pud_populate(mm, pmd, pte) do { } while (0) | |
239 | ||
240 | /* | |
241 | * (pmds are folded into puds so this doesn't get actually called, | |
242 | * but the define is needed for a generic inline function.) | |
243 | */ | |
244 | #define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) | |
245 | ||
246 | #define pud_page(pud) (pmd_page((pmd_t){ pud })) | |
46a82b2d | 247 | #define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) |
1da177e4 LT |
248 | |
249 | /* | |
250 | * (pmds are folded into pgds so this doesn't get actually called, | |
251 | * but the define is needed for a generic inline function.) | |
252 | */ | |
253 | extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd); | |
254 | ||
255 | #define set_pmd(pmdptr, pmdval) \ | |
256 | do { \ | |
257 | __set_pmd((pmdptr), (pmdval).ste[0]); \ | |
258 | } while(0) | |
259 | ||
260 | #define __pmd_index(address) 0 | |
261 | ||
262 | static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) | |
263 | { | |
264 | return (pmd_t *) dir + __pmd_index(address); | |
265 | } | |
266 | ||
267 | #define pte_same(a, b) ((a).pte == (b).pte) | |
268 | #define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT)))) | |
269 | #define pte_none(x) (!(x).pte) | |
270 | #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) | |
271 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
272 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
273 | ||
274 | #define VMALLOC_VMADDR(x) ((unsigned long) (x)) | |
275 | ||
276 | #endif /* !__ASSEMBLY__ */ | |
277 | ||
278 | /* | |
279 | * control flags in AMPR registers and TLB entries | |
280 | */ | |
281 | #define _PAGE_BIT_PRESENT xAMPRx_V_BIT | |
282 | #define _PAGE_BIT_WP DAMPRx_WP_BIT | |
283 | #define _PAGE_BIT_NOCACHE xAMPRx_C_BIT | |
284 | #define _PAGE_BIT_SUPER xAMPRx_S_BIT | |
285 | #define _PAGE_BIT_ACCESSED xAMPRx_RESERVED8_BIT | |
286 | #define _PAGE_BIT_DIRTY xAMPRx_M_BIT | |
287 | #define _PAGE_BIT_NOTGLOBAL xAMPRx_NG_BIT | |
288 | ||
289 | #define _PAGE_PRESENT xAMPRx_V | |
290 | #define _PAGE_WP DAMPRx_WP | |
291 | #define _PAGE_NOCACHE xAMPRx_C | |
292 | #define _PAGE_SUPER xAMPRx_S | |
293 | #define _PAGE_ACCESSED xAMPRx_RESERVED8 /* accessed if set */ | |
294 | #define _PAGE_DIRTY xAMPRx_M | |
295 | #define _PAGE_NOTGLOBAL xAMPRx_NG | |
296 | ||
297 | #define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13) | |
298 | ||
1da177e4 LT |
299 | #define _PAGE_PROTNONE 0x000 /* If not present */ |
300 | ||
301 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
302 | ||
303 | #define __PGPROT_BASE \ | |
304 | (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED) | |
305 | ||
306 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | |
307 | #define PAGE_SHARED __pgprot(__PGPROT_BASE) | |
308 | #define PAGE_COPY __pgprot(__PGPROT_BASE | _PAGE_WP) | |
309 | #define PAGE_READONLY __pgprot(__PGPROT_BASE | _PAGE_WP) | |
310 | ||
311 | #define __PAGE_KERNEL (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY) | |
312 | #define __PAGE_KERNEL_NOCACHE (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE) | |
313 | #define __PAGE_KERNEL_RO (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP) | |
314 | ||
315 | #define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL) | |
316 | ||
317 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | |
318 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | |
319 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | |
320 | ||
321 | #define _PAGE_TABLE (_PAGE_PRESENT | xAMPRx_SS_16Kb) | |
322 | ||
323 | #ifndef __ASSEMBLY__ | |
324 | ||
325 | /* | |
326 | * The FR451 can do execute protection by virtue of having separate TLB miss handlers for | |
327 | * instruction access and for data access. However, we don't have enough reserved bits to say | |
328 | * "execute only", so we don't bother. If you can read it, you can execute it and vice versa. | |
329 | */ | |
330 | #define __P000 PAGE_NONE | |
331 | #define __P001 PAGE_READONLY | |
332 | #define __P010 PAGE_COPY | |
333 | #define __P011 PAGE_COPY | |
334 | #define __P100 PAGE_READONLY | |
335 | #define __P101 PAGE_READONLY | |
336 | #define __P110 PAGE_COPY | |
337 | #define __P111 PAGE_COPY | |
338 | ||
339 | #define __S000 PAGE_NONE | |
340 | #define __S001 PAGE_READONLY | |
341 | #define __S010 PAGE_SHARED | |
342 | #define __S011 PAGE_SHARED | |
343 | #define __S100 PAGE_READONLY | |
344 | #define __S101 PAGE_READONLY | |
345 | #define __S110 PAGE_SHARED | |
346 | #define __S111 PAGE_SHARED | |
347 | ||
348 | /* | |
349 | * Define this to warn about kernel memory accesses that are | |
e49332bd | 350 | * done without a 'access_ok(VERIFY_WRITE,..)' |
1da177e4 | 351 | */ |
e49332bd | 352 | #undef TEST_ACCESS_OK |
1da177e4 LT |
353 | |
354 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | |
355 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | |
356 | ||
357 | #define pmd_none(x) (!pmd_val(x)) | |
358 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | |
359 | #define pmd_bad(x) (pmd_val(x) & xAMPRx_SS) | |
360 | #define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0) | |
361 | ||
46a82b2d | 362 | #define pmd_page_vaddr(pmd) \ |
1da177e4 LT |
363 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
364 | ||
365 | #ifndef CONFIG_DISCONTIGMEM | |
366 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | |
367 | #endif | |
368 | ||
369 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
370 | ||
371 | /* | |
372 | * The following only work if pte_present() is true. | |
373 | * Undefined behaviour if not.. | |
374 | */ | |
1da177e4 LT |
375 | static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; } |
376 | static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; } | |
377 | static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); } | |
7e675137 | 378 | static inline int pte_special(pte_t pte) { return 0; } |
1da177e4 | 379 | |
1da177e4 LT |
380 | static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; } |
381 | static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; } | |
382 | static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; } | |
1da177e4 LT |
383 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; } |
384 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } | |
385 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } | |
7e675137 | 386 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
1da177e4 | 387 | |
1da177e4 LT |
388 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
389 | { | |
390 | int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); | |
391 | asm volatile("dcf %M0" :: "U"(*ptep)); | |
392 | return i; | |
393 | } | |
394 | ||
395 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
396 | { | |
397 | unsigned long x = xchg(&ptep->pte, 0); | |
398 | asm volatile("dcf %M0" :: "U"(*ptep)); | |
399 | return __pte(x); | |
400 | } | |
401 | ||
402 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
403 | { | |
404 | set_bit(_PAGE_BIT_WP, ptep); | |
405 | asm volatile("dcf %M0" :: "U"(*ptep)); | |
406 | } | |
407 | ||
41be6aef DH |
408 | /* |
409 | * Macro to mark a page protection value as "uncacheable" | |
410 | */ | |
411 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE)) | |
412 | ||
1da177e4 LT |
413 | /* |
414 | * Conversion functions: convert a page and protection to a page entry, | |
415 | * and a page entry and page directory to the page they refer to. | |
416 | */ | |
417 | ||
418 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
419 | #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE) | |
420 | ||
421 | /* This takes a physical page address that is used by the remapping functions */ | |
422 | #define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot) | |
423 | ||
424 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
425 | { | |
426 | pte.pte &= _PAGE_CHG_MASK; | |
427 | pte.pte |= pgprot_val(newprot); | |
428 | return pte; | |
429 | } | |
430 | ||
1da177e4 LT |
431 | /* to find an entry in a page-table-directory. */ |
432 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
433 | #define pgd_index_k(addr) pgd_index(addr) | |
434 | ||
435 | /* Find an entry in the bottom-level page table.. */ | |
436 | #define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
437 | ||
438 | /* | |
439 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
440 | * | |
441 | * this macro returns the index of the entry in the pte page which would | |
442 | * control the given virtual address | |
443 | */ | |
444 | #define pte_index(address) \ | |
445 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
446 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 447 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 LT |
448 | |
449 | #if defined(CONFIG_HIGHPTE) | |
450 | #define pte_offset_map(dir, address) \ | |
ece0e2b6 PZ |
451 | ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address)) |
452 | #define pte_unmap(pte) kunmap_atomic(pte) | |
1da177e4 LT |
453 | #else |
454 | #define pte_offset_map(dir, address) \ | |
455 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
1da177e4 | 456 | #define pte_unmap(pte) do { } while (0) |
1da177e4 LT |
457 | #endif |
458 | ||
459 | /* | |
460 | * Handle swap and file entries | |
461 | * - the PTE is encoded in the following format: | |
462 | * bit 0: Must be 0 (!_PAGE_PRESENT) | |
ca5bfa7b KS |
463 | * bits 1-6: Swap type |
464 | * bits 7-31: Swap offset | |
1da177e4 | 465 | */ |
ca5bfa7b KS |
466 | #define __swp_type(x) (((x).val >> 1) & 0x1f) |
467 | #define __swp_offset(x) ((x).val >> 7) | |
468 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 7) }) | |
2f609712 | 469 | #define __pte_to_swp_entry(_pte) ((swp_entry_t) { (_pte).pte }) |
1da177e4 LT |
470 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
471 | ||
1da177e4 LT |
472 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
473 | #define PageSkip(page) (0) | |
474 | #define kern_addr_valid(addr) (1) | |
475 | ||
1da177e4 | 476 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1da177e4 LT |
477 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
478 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
479 | #define __HAVE_ARCH_PTE_SAME | |
480 | #include <asm-generic/pgtable.h> | |
481 | ||
482 | /* | |
483 | * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache | |
484 | */ | |
4b3073e1 | 485 | static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
1da177e4 | 486 | { |
a31b9dd8 | 487 | struct mm_struct *mm; |
1da177e4 | 488 | unsigned long ampr; |
1da177e4 | 489 | |
a31b9dd8 DH |
490 | mm = current->mm; |
491 | if (mm) { | |
492 | pgd_t *pge = pgd_offset(mm, address); | |
493 | pud_t *pue = pud_offset(pge, address); | |
494 | pmd_t *pme = pmd_offset(pue, address); | |
495 | ||
496 | ampr = pme->ste[0] & 0xffffff00; | |
497 | ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | | |
498 | xAMPRx_V; | |
499 | } else { | |
500 | address = ULONG_MAX; | |
501 | ampr = 0; | |
502 | } | |
1da177e4 LT |
503 | |
504 | asm volatile("movgs %0,scr0\n" | |
505 | "movgs %0,scr1\n" | |
506 | "movgs %1,dampr4\n" | |
507 | "movgs %1,dampr5\n" | |
508 | : | |
509 | : "r"(address), "r"(ampr) | |
510 | ); | |
511 | } | |
512 | ||
513 | #ifdef CONFIG_PROC_FS | |
514 | extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer); | |
515 | #endif | |
516 | ||
517 | extern void __init pgtable_cache_init(void); | |
518 | ||
519 | #endif /* !__ASSEMBLY__ */ | |
520 | #endif /* !CONFIG_MMU */ | |
521 | ||
522 | #ifndef __ASSEMBLY__ | |
523 | extern void __init paging_init(void); | |
524 | #endif /* !__ASSEMBLY__ */ | |
525 | ||
526 | #endif /* _ASM_PGTABLE_H */ |