1 #ifndef _SPARC_PGTABLE_H
2 #define _SPARC_PGTABLE_H
4 /* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/const.h>
14 #include <asm-generic/4level-fixup.h>
16 #include <linux/spinlock.h>
17 #include <linux/mm_types.h>
18 #include <asm/types.h>
19 #include <asm/pgtsrmmu.h>
20 #include <asm/vaddrs.h>
21 #include <asm/oplib.h>
22 #include <asm/cpu_type.h>
25 struct vm_area_struct
;
29 unsigned long calc_highpages(void);
30 unsigned long __init
bootmem_init(unsigned long *pages_avail
);
32 #define pte_ERROR(e) __builtin_trap()
33 #define pmd_ERROR(e) __builtin_trap()
34 #define pgd_ERROR(e) __builtin_trap()
37 #define PMD_SIZE (1UL << PMD_SHIFT)
38 #define PMD_MASK (~(PMD_SIZE-1))
39 #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
40 #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
41 #define PGDIR_SIZE SRMMU_PGDIR_SIZE
42 #define PGDIR_MASK SRMMU_PGDIR_MASK
43 #define PTRS_PER_PTE 1024
44 #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
45 #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
46 #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
47 #define FIRST_USER_ADDRESS 0UL
48 #define PTE_SIZE (PTRS_PER_PTE*4)
50 #define PAGE_NONE SRMMU_PAGE_NONE
51 #define PAGE_SHARED SRMMU_PAGE_SHARED
52 #define PAGE_COPY SRMMU_PAGE_COPY
53 #define PAGE_READONLY SRMMU_PAGE_RDONLY
54 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
56 /* Top-level page directory - dummy used by init-mm.
57 * srmmu.c will assign the real one (which is dynamically sized) */
58 #define swapper_pg_dir NULL
60 void paging_init(void);
62 extern unsigned long ptr_in_current_pgd
;
65 #define __P000 PAGE_NONE
66 #define __P001 PAGE_READONLY
67 #define __P010 PAGE_COPY
68 #define __P011 PAGE_COPY
69 #define __P100 PAGE_READONLY
70 #define __P101 PAGE_READONLY
71 #define __P110 PAGE_COPY
72 #define __P111 PAGE_COPY
74 #define __S000 PAGE_NONE
75 #define __S001 PAGE_READONLY
76 #define __S010 PAGE_SHARED
77 #define __S011 PAGE_SHARED
78 #define __S100 PAGE_READONLY
79 #define __S101 PAGE_READONLY
80 #define __S110 PAGE_SHARED
81 #define __S111 PAGE_SHARED
83 /* First physical page can be anywhere, the following is needed so that
84 * va-->pa and vice versa conversions work properly without performance
85 * hit for all __pa()/__va() operations.
87 extern unsigned long phys_base
;
88 extern unsigned long pfn_base
;
91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc..
94 extern unsigned long empty_zero_page
;
96 #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
99 * In general all page table modifications should use the V8 atomic
100 * swap instruction. This insures the mmu and the cpu are in sync
101 * with respect to ref/mod bits in the page tables.
103 static inline unsigned long srmmu_swap(unsigned long *addr
, unsigned long value
)
105 __asm__
__volatile__("swap [%2], %0" :
106 "=&r" (value
) : "0" (value
), "r" (addr
) : "memory");
110 /* Certain architectures need to do special things when pte's
111 * within a page table are directly modified. Thus, the following
112 * hook is made available.
115 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
117 srmmu_swap((unsigned long *)ptep
, pte_val(pteval
));
120 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
122 static inline int srmmu_device_memory(unsigned long x
)
124 return ((x
& 0xF0000000) != 0);
127 static inline struct page
*pmd_page(pmd_t pmd
)
129 if (srmmu_device_memory(pmd_val(pmd
)))
131 return pfn_to_page((pmd_val(pmd
) & SRMMU_PTD_PMASK
) >> (PAGE_SHIFT
-4));
134 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
136 if (srmmu_device_memory(pgd_val(pgd
))) {
139 unsigned long v
= pgd_val(pgd
) & SRMMU_PTD_PMASK
;
140 return (unsigned long)__nocache_va(v
<< 4);
144 static inline int pte_present(pte_t pte
)
146 return ((pte_val(pte
) & SRMMU_ET_MASK
) == SRMMU_ET_PTE
);
149 static inline int pte_none(pte_t pte
)
151 return !pte_val(pte
);
154 static inline void __pte_clear(pte_t
*ptep
)
156 set_pte(ptep
, __pte(0));
159 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
164 static inline int pmd_bad(pmd_t pmd
)
166 return (pmd_val(pmd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
;
169 static inline int pmd_present(pmd_t pmd
)
171 return ((pmd_val(pmd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
);
174 static inline int pmd_none(pmd_t pmd
)
176 return !pmd_val(pmd
);
179 static inline void pmd_clear(pmd_t
*pmdp
)
182 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++)
183 set_pte((pte_t
*)&pmdp
->pmdv
[i
], __pte(0));
186 static inline int pgd_none(pgd_t pgd
)
188 return !(pgd_val(pgd
) & 0xFFFFFFF);
191 static inline int pgd_bad(pgd_t pgd
)
193 return (pgd_val(pgd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
;
196 static inline int pgd_present(pgd_t pgd
)
198 return ((pgd_val(pgd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
);
201 static inline void pgd_clear(pgd_t
*pgdp
)
203 set_pte((pte_t
*)pgdp
, __pte(0));
207 * The following only work if pte_present() is true.
208 * Undefined behaviour if not..
210 static inline int pte_write(pte_t pte
)
212 return pte_val(pte
) & SRMMU_WRITE
;
215 static inline int pte_dirty(pte_t pte
)
217 return pte_val(pte
) & SRMMU_DIRTY
;
220 static inline int pte_young(pte_t pte
)
222 return pte_val(pte
) & SRMMU_REF
;
225 static inline int pte_special(pte_t pte
)
230 static inline pte_t
pte_wrprotect(pte_t pte
)
232 return __pte(pte_val(pte
) & ~SRMMU_WRITE
);
235 static inline pte_t
pte_mkclean(pte_t pte
)
237 return __pte(pte_val(pte
) & ~SRMMU_DIRTY
);
240 static inline pte_t
pte_mkold(pte_t pte
)
242 return __pte(pte_val(pte
) & ~SRMMU_REF
);
245 static inline pte_t
pte_mkwrite(pte_t pte
)
247 return __pte(pte_val(pte
) | SRMMU_WRITE
);
250 static inline pte_t
pte_mkdirty(pte_t pte
)
252 return __pte(pte_val(pte
) | SRMMU_DIRTY
);
255 static inline pte_t
pte_mkyoung(pte_t pte
)
257 return __pte(pte_val(pte
) | SRMMU_REF
);
260 #define pte_mkspecial(pte) (pte)
262 #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
264 static inline unsigned long pte_pfn(pte_t pte
)
266 if (srmmu_device_memory(pte_val(pte
))) {
267 /* Just return something that will cause
268 * pfn_valid() to return false. This makes
269 * copy_one_pte() to just directly copy to
274 return (pte_val(pte
) & SRMMU_PTE_PMASK
) >> (PAGE_SHIFT
-4);
277 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
280 * Conversion functions: convert a page and protection to a page entry,
281 * and a page entry and page directory to the page they refer to.
283 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
285 return __pte((page_to_pfn(page
) << (PAGE_SHIFT
-4)) | pgprot_val(pgprot
));
288 static inline pte_t
mk_pte_phys(unsigned long page
, pgprot_t pgprot
)
290 return __pte(((page
) >> 4) | pgprot_val(pgprot
));
293 static inline pte_t
mk_pte_io(unsigned long page
, pgprot_t pgprot
, int space
)
295 return __pte(((page
) >> 4) | (space
<< 28) | pgprot_val(pgprot
));
298 #define pgprot_noncached pgprot_noncached
299 static inline pgprot_t
pgprot_noncached(pgprot_t prot
)
301 prot
&= ~__pgprot(SRMMU_CACHE
);
305 static pte_t
pte_modify(pte_t pte
, pgprot_t newprot
) __attribute_const__
;
306 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
308 return __pte((pte_val(pte
) & SRMMU_CHG_MASK
) |
309 pgprot_val(newprot
));
312 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
314 /* to find an entry in a page-table-directory */
315 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
317 /* to find an entry in a kernel page-table-directory */
318 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
320 /* Find an entry in the second-level page table.. */
321 static inline pmd_t
*pmd_offset(pgd_t
* dir
, unsigned long address
)
323 return (pmd_t
*) pgd_page_vaddr(*dir
) +
324 ((address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1));
327 /* Find an entry in the third-level page table.. */
328 pte_t
*pte_offset_kernel(pmd_t
* dir
, unsigned long address
);
331 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
333 #define pte_offset_map(d, a) pte_offset_kernel(d,a)
334 #define pte_unmap(pte) do{}while(0)
337 void mmu_info(struct seq_file
*m
);
339 /* Fault handler stuff... */
340 #define FAULT_CODE_PROT 0x1
341 #define FAULT_CODE_WRITE 0x2
342 #define FAULT_CODE_USER 0x4
344 #define update_mmu_cache(vma, address, ptep) do { } while (0)
346 void srmmu_mapiorange(unsigned int bus
, unsigned long xpa
,
347 unsigned long xva
, unsigned int len
);
348 void srmmu_unmapiorange(unsigned long virt_addr
, unsigned int len
);
350 /* Encode and de-code a swap entry */
351 static inline unsigned long __swp_type(swp_entry_t entry
)
353 return (entry
.val
>> SRMMU_SWP_TYPE_SHIFT
) & SRMMU_SWP_TYPE_MASK
;
356 static inline unsigned long __swp_offset(swp_entry_t entry
)
358 return (entry
.val
>> SRMMU_SWP_OFF_SHIFT
) & SRMMU_SWP_OFF_MASK
;
361 static inline swp_entry_t
__swp_entry(unsigned long type
, unsigned long offset
)
363 return (swp_entry_t
) {
364 (type
& SRMMU_SWP_TYPE_MASK
) << SRMMU_SWP_TYPE_SHIFT
365 | (offset
& SRMMU_SWP_OFF_MASK
) << SRMMU_SWP_OFF_SHIFT
};
368 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
369 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
371 static inline unsigned long
372 __get_phys (unsigned long addr
)
374 switch (sparc_cpu_model
){
377 return ((srmmu_get_pte (addr
) & 0xffffff00) << 4);
384 __get_iospace (unsigned long addr
)
386 switch (sparc_cpu_model
){
389 return (srmmu_get_pte (addr
) >> 28);
395 extern unsigned long *sparc_valid_addr_bitmap
;
397 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
398 #define kern_addr_valid(addr) \
399 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
402 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
403 * its high 4 bits. These macros/functions put it there or get it from there.
405 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
406 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
407 #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
409 int remap_pfn_range(struct vm_area_struct
*, unsigned long, unsigned long,
410 unsigned long, pgprot_t
);
412 static inline int io_remap_pfn_range(struct vm_area_struct
*vma
,
413 unsigned long from
, unsigned long pfn
,
414 unsigned long size
, pgprot_t prot
)
416 unsigned long long offset
, space
, phys_base
;
418 offset
= ((unsigned long long) GET_PFN(pfn
)) << PAGE_SHIFT
;
419 space
= GET_IOSPACE(pfn
);
420 phys_base
= offset
| (space
<< 32ULL);
422 return remap_pfn_range(vma
, from
, phys_base
>> PAGE_SHIFT
, size
, prot
);
424 #define io_remap_pfn_range io_remap_pfn_range
426 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
427 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
429 int __changed = !pte_same(*(__ptep), __entry); \
431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
432 flush_tlb_page(__vma, __address); \
437 #include <asm-generic/pgtable.h>
439 #endif /* !(__ASSEMBLY__) */
441 #define VMALLOC_START _AC(0xfe600000,UL)
442 #define VMALLOC_END _AC(0xffc00000,UL)
444 /* We provide our own get_unmapped_area to cope with VA holes for userland */
445 #define HAVE_ARCH_UNMAPPED_AREA
448 * No page table caches to initialise
450 #define pgtable_cache_init() do { } while (0)
452 #endif /* !(_SPARC_PGTABLE_H) */
This page took 0.040567 seconds and 6 git commands to generate.