sparc32: drop swapper_pg_dir
[deliverable/linux.git] / arch / sparc / include / asm / pgtable_32.h
CommitLineData
f5e706ad
SR
1#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
a439fe51 4/* asm/pgtable.h: Defines and functions used to work
f5e706ad
SR
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
eb485d64
SR
11#include <linux/const.h>
12
f5e706ad
SR
13#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/swap.h>
18#include <asm/types.h>
f5e706ad 19#include <asm/pgtsrmmu.h>
9701b264 20#include <asm/vaddrs.h>
f5e706ad 21#include <asm/oplib.h>
d550bbd4 22#include <asm/cpu_type.h>
f5e706ad
SR
23
24
25struct vm_area_struct;
26struct page;
27
28extern void load_mmu(void);
29extern unsigned long calc_highpages(void);
30
f5e706ad
SR
31#define pte_ERROR(e) __builtin_trap()
32#define pmd_ERROR(e) __builtin_trap()
33#define pgd_ERROR(e) __builtin_trap()
34
1ee0e144 35#define PMD_SHIFT 22
f5e706ad
SR
36#define PMD_SIZE (1UL << PMD_SHIFT)
37#define PMD_MASK (~(PMD_SIZE-1))
38#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
3d386c0e
DM
39#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
40#define PGDIR_SIZE SRMMU_PGDIR_SIZE
41#define PGDIR_MASK SRMMU_PGDIR_MASK
f5e706ad 42#define PTRS_PER_PTE 1024
3d386c0e
DM
43#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
44#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
45#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
f5e706ad
SR
46#define FIRST_USER_ADDRESS 0
47#define PTE_SIZE (PTRS_PER_PTE*4)
48
6439d1c6
DM
49#define PAGE_NONE SRMMU_PAGE_NONE
50#define PAGE_SHARED SRMMU_PAGE_SHARED
51#define PAGE_COPY SRMMU_PAGE_COPY
52#define PAGE_READONLY SRMMU_PAGE_RDONLY
53#define PAGE_KERNEL SRMMU_PAGE_KERNEL
f5e706ad 54
881e02d2
SR
55/* Top-level page directory - dummy used by init-mm.
56 * srmmu.c will assign the real one (which is dynamically sized) */
57#define swapper_pg_dir NULL
f5e706ad
SR
58
59extern void paging_init(void);
60
f5e706ad
SR
61extern unsigned long ptr_in_current_pgd;
62
6439d1c6
DM
63/* xwr */
64#define __P000 PAGE_NONE
65#define __P001 PAGE_READONLY
66#define __P010 PAGE_COPY
67#define __P011 PAGE_COPY
68#define __P100 PAGE_READONLY
69#define __P101 PAGE_READONLY
70#define __P110 PAGE_COPY
71#define __P111 PAGE_COPY
72
73#define __S000 PAGE_NONE
74#define __S001 PAGE_READONLY
75#define __S010 PAGE_SHARED
76#define __S011 PAGE_SHARED
77#define __S100 PAGE_READONLY
78#define __S101 PAGE_READONLY
79#define __S110 PAGE_SHARED
80#define __S111 PAGE_SHARED
f5e706ad
SR
81
82extern int num_contexts;
83
84/* First physical page can be anywhere, the following is needed so that
85 * va-->pa and vice versa conversions work properly without performance
86 * hit for all __pa()/__va() operations.
87 */
88extern unsigned long phys_base;
89extern unsigned long pfn_base;
90
91/*
92 * BAD_PAGETABLE is used when we need a bogus page-table, while
93 * BAD_PAGE is used for a bogus page.
94 *
95 * ZERO_PAGE is a global shared page that is always zero: used
96 * for zero-mapped memory areas etc..
97 */
98extern pte_t * __bad_pagetable(void);
99extern pte_t __bad_page(void);
100extern unsigned long empty_zero_page;
101
102#define BAD_PAGETABLE __bad_pagetable()
103#define BAD_PAGE __bad_page()
104#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
105
a46d6056
DM
106/*
107 * In general all page table modifications should use the V8 atomic
108 * swap instruction. This insures the mmu and the cpu are in sync
109 * with respect to ref/mod bits in the page tables.
110 */
111static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
112{
113 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
114 return value;
115}
116
62875cff
DM
117/* Certain architectures need to do special things when pte's
118 * within a page table are directly modified. Thus, the following
119 * hook is made available.
120 */
121
122static inline void set_pte(pte_t *ptep, pte_t pteval)
a46d6056
DM
123{
124 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
125}
126
62875cff
DM
127#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
128
3d827367
DM
129static inline int srmmu_device_memory(unsigned long x)
130{
131 return ((x & 0xF0000000) != 0);
132}
133
134static inline struct page *pmd_page(pmd_t pmd)
135{
136 if (srmmu_device_memory(pmd_val(pmd)))
137 BUG();
138 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
139}
140
9701b264
SR
141static inline unsigned long pgd_page_vaddr(pgd_t pgd)
142{
143 if (srmmu_device_memory(pgd_val(pgd))) {
144 return ~0;
145 } else {
146 unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
147 return (unsigned long)__nocache_va(v << 4);
148 }
149}
f5e706ad 150
62875cff
DM
151static inline int pte_present(pte_t pte)
152{
153 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
154}
f5e706ad
SR
155
156static inline int pte_none(pte_t pte)
157{
c87fe1c0 158 return !pte_val(pte);
f5e706ad
SR
159}
160
a46d6056
DM
161static inline void __pte_clear(pte_t *ptep)
162{
62875cff 163 set_pte(ptep, __pte(0));
a46d6056
DM
164}
165
166static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
167{
168 __pte_clear(ptep);
169}
f5e706ad 170
f167edae
DM
171static inline int pmd_bad(pmd_t pmd)
172{
173 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
174}
175
176static inline int pmd_present(pmd_t pmd)
177{
178 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
179}
f5e706ad
SR
180
181static inline int pmd_none(pmd_t pmd)
182{
c87fe1c0 183 return !pmd_val(pmd);
f5e706ad
SR
184}
185
a46d6056
DM
186static inline void pmd_clear(pmd_t *pmdp)
187{
188 int i;
189 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
62875cff 190 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
a46d6056 191}
f5e706ad 192
7d9fa4aa
DM
193static inline int pgd_none(pgd_t pgd)
194{
195 return !(pgd_val(pgd) & 0xFFFFFFF);
196}
f5e706ad 197
7d9fa4aa
DM
198static inline int pgd_bad(pgd_t pgd)
199{
200 return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
201}
202
203static inline int pgd_present(pgd_t pgd)
204{
205 return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
206}
a46d6056
DM
207
208static inline void pgd_clear(pgd_t *pgdp)
209{
62875cff 210 set_pte((pte_t *)pgdp, __pte(0));
a46d6056 211}
f5e706ad
SR
212
213/*
214 * The following only work if pte_present() is true.
215 * Undefined behaviour if not..
216 */
f5e706ad
SR
217static inline int pte_write(pte_t pte)
218{
f755f77a 219 return pte_val(pte) & SRMMU_WRITE;
f5e706ad
SR
220}
221
f5e706ad
SR
222static inline int pte_dirty(pte_t pte)
223{
f755f77a 224 return pte_val(pte) & SRMMU_DIRTY;
f5e706ad
SR
225}
226
f5e706ad
SR
227static inline int pte_young(pte_t pte)
228{
f755f77a 229 return pte_val(pte) & SRMMU_REF;
f5e706ad
SR
230}
231
232/*
233 * The following only work if pte_present() is not true.
234 */
f5e706ad
SR
235static inline int pte_file(pte_t pte)
236{
301d5bbb 237 return pte_val(pte) & SRMMU_FILE;
f5e706ad
SR
238}
239
240static inline int pte_special(pte_t pte)
241{
242 return 0;
243}
244
f5e706ad
SR
245static inline pte_t pte_wrprotect(pte_t pte)
246{
301d5bbb 247 return __pte(pte_val(pte) & ~SRMMU_WRITE);
f5e706ad
SR
248}
249
f5e706ad
SR
250static inline pte_t pte_mkclean(pte_t pte)
251{
301d5bbb 252 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
f5e706ad
SR
253}
254
f5e706ad
SR
255static inline pte_t pte_mkold(pte_t pte)
256{
301d5bbb 257 return __pte(pte_val(pte) & ~SRMMU_REF);
f5e706ad
SR
258}
259
301d5bbb
DM
260static inline pte_t pte_mkwrite(pte_t pte)
261{
262 return __pte(pte_val(pte) | SRMMU_WRITE);
263}
f5e706ad 264
301d5bbb
DM
265static inline pte_t pte_mkdirty(pte_t pte)
266{
267 return __pte(pte_val(pte) | SRMMU_DIRTY);
268}
269
270static inline pte_t pte_mkyoung(pte_t pte)
271{
272 return __pte(pte_val(pte) | SRMMU_REF);
273}
f5e706ad
SR
274
275#define pte_mkspecial(pte) (pte)
276
277#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
278
3d827367
DM
279static inline unsigned long pte_pfn(pte_t pte)
280{
281 if (srmmu_device_memory(pte_val(pte))) {
282 /* Just return something that will cause
283 * pfn_valid() to return false. This makes
284 * copy_one_pte() to just directly copy to
285 * PTE over.
286 */
287 return ~0UL;
288 }
289 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
290}
291
f5e706ad
SR
292#define pte_page(pte) pfn_to_page(pte_pfn(pte))
293
294/*
295 * Conversion functions: convert a page and protection to a page entry,
296 * and a page entry and page directory to the page they refer to.
297 */
62875cff
DM
298static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
299{
300 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
301}
f5e706ad 302
62875cff
DM
303static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
304{
305 return __pte(((page) >> 4) | pgprot_val(pgprot));
306}
f5e706ad 307
62875cff
DM
308static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
309{
310 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
311}
f5e706ad 312
afaedde7
SR
313#define pgprot_noncached pgprot_noncached
314static inline pgprot_t pgprot_noncached(pgprot_t prot)
315{
316 prot &= ~__pgprot(SRMMU_CACHE);
317 return prot;
318}
f5e706ad 319
f5e706ad
SR
320static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
321static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
322{
9701b264 323 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
f5e706ad
SR
324 pgprot_val(newprot));
325}
326
327#define pgd_index(address) ((address) >> PGDIR_SHIFT)
328
329/* to find an entry in a page-table-directory */
330#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
331
332/* to find an entry in a kernel page-table-directory */
333#define pgd_offset_k(address) pgd_offset(&init_mm, address)
334
335/* Find an entry in the second-level page table.. */
9701b264
SR
336static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
337{
338 return (pmd_t *) pgd_page_vaddr(*dir) +
339 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
340}
f5e706ad
SR
341
342/* Find an entry in the third-level page table.. */
9701b264 343pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
f5e706ad
SR
344
345/*
ee906c9e 346 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
f5e706ad
SR
347 */
348#define pte_offset_map(d, a) pte_offset_kernel(d,a)
f5e706ad 349#define pte_unmap(pte) do{}while(0)
f5e706ad 350
f5e706ad 351struct seq_file;
9701b264 352void mmu_info(struct seq_file *m);
f5e706ad
SR
353
354/* Fault handler stuff... */
355#define FAULT_CODE_PROT 0x1
356#define FAULT_CODE_WRITE 0x2
357#define FAULT_CODE_USER 0x4
358
f613914e 359#define update_mmu_cache(vma, address, ptep) do { } while (0)
f5e706ad 360
9701b264
SR
361void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
362 unsigned long xva, unsigned int len);
363void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
f5e706ad 364
f5e706ad 365/* Encode and de-code a swap entry */
9701b264
SR
366static inline unsigned long __swp_type(swp_entry_t entry)
367{
368 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
369}
f5e706ad 370
9701b264
SR
371static inline unsigned long __swp_offset(swp_entry_t entry)
372{
373 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
374}
375
376static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
377{
378 return (swp_entry_t) {
379 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
380 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
381}
f5e706ad
SR
382
383#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
384#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
385
386/* file-offset-in-pte helpers */
afaedde7
SR
387static inline unsigned long pte_to_pgoff(pte_t pte)
388{
389 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
390}
f5e706ad 391
afaedde7
SR
392static inline pte_t pgoff_to_pte(unsigned long pgoff)
393{
394 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
395}
f5e706ad
SR
396
397/*
398 * This is made a constant because mm/fremap.c required a constant.
f5e706ad
SR
399 */
400#define PTE_FILE_MAX_BITS 24
401
402/*
403 */
404struct ctx_list {
405 struct ctx_list *next;
406 struct ctx_list *prev;
407 unsigned int ctx_number;
408 struct mm_struct *ctx_mm;
409};
410
411extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
412extern struct ctx_list ctx_free; /* Head of free list */
413extern struct ctx_list ctx_used; /* Head of used contexts list */
414
415#define NO_CONTEXT -1
416
417static inline void remove_from_ctx_list(struct ctx_list *entry)
418{
419 entry->next->prev = entry->prev;
420 entry->prev->next = entry->next;
421}
422
423static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
424{
425 entry->next = head;
426 (entry->prev = head->prev)->next = entry;
427 head->prev = entry;
428}
429#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
430#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
431
432static inline unsigned long
433__get_phys (unsigned long addr)
434{
435 switch (sparc_cpu_model){
f5e706ad
SR
436 case sun4m:
437 case sun4d:
438 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
439 default:
440 return 0;
441 }
442}
443
444static inline int
445__get_iospace (unsigned long addr)
446{
447 switch (sparc_cpu_model){
f5e706ad
SR
448 case sun4m:
449 case sun4d:
450 return (srmmu_get_pte (addr) >> 28);
451 default:
452 return -1;
453 }
454}
455
456extern unsigned long *sparc_valid_addr_bitmap;
457
458/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
459#define kern_addr_valid(addr) \
460 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
461
f5e706ad
SR
462/*
463 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
464 * its high 4 bits. These macros/functions put it there or get it from there.
465 */
466#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
467#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
468#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
469
3e37fd31
DM
470extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
471 unsigned long, pgprot_t);
472
473static inline int io_remap_pfn_range(struct vm_area_struct *vma,
474 unsigned long from, unsigned long pfn,
475 unsigned long size, pgprot_t prot)
476{
477 unsigned long long offset, space, phys_base;
478
479 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
480 space = GET_IOSPACE(pfn);
481 phys_base = offset | (space << 32ULL);
482
483 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
484}
485
f5e706ad
SR
486#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
487#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
488({ \
489 int __changed = !pte_same(*(__ptep), __entry); \
490 if (__changed) { \
491 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
492 flush_tlb_page(__vma, __address); \
493 } \
1ee0e144 494 __changed; \
f5e706ad
SR
495})
496
497#include <asm-generic/pgtable.h>
498
499#endif /* !(__ASSEMBLY__) */
500
eb485d64 501#define VMALLOC_START _AC(0xfe600000,UL)
eb485d64 502#define VMALLOC_END _AC(0xffc00000,UL)
f5e706ad 503
f5e706ad
SR
504/* We provide our own get_unmapped_area to cope with VA holes for userland */
505#define HAVE_ARCH_UNMAPPED_AREA
506
507/*
508 * No page table caches to initialise
509 */
510#define pgtable_cache_init() do { } while (0)
511
512#endif /* !(_SPARC_PGTABLE_H) */
This page took 0.296176 seconds and 5 git commands to generate.