Commit | Line | Data |
---|---|---|
047ea784 PM |
1 | #ifndef _ASM_POWERPC_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
047ea784 | 4 | |
9c709f3b DG |
5 | #ifndef __ASSEMBLY__ |
6 | #include <asm/processor.h> /* For TASK_SIZE */ | |
7 | #include <asm/mmu.h> | |
8 | #include <asm/page.h> | |
8d30c14c | 9 | |
9c709f3b | 10 | struct mm_struct; |
8d30c14c BH |
11 | |
12 | #ifdef CONFIG_DEBUG_VM | |
13 | extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); | |
14 | #else /* CONFIG_DEBUG_VM */ | |
15 | static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) | |
16 | { | |
17 | } | |
18 | #endif /* !CONFIG_DEBUG_VM */ | |
19 | ||
9c709f3b DG |
20 | #endif /* !__ASSEMBLY__ */ |
21 | ||
f88df14b DG |
22 | #if defined(CONFIG_PPC64) |
23 | # include <asm/pgtable-ppc64.h> | |
047ea784 | 24 | #else |
f88df14b | 25 | # include <asm/pgtable-ppc32.h> |
e28f7faf | 26 | #endif |
1da177e4 | 27 | |
1da177e4 | 28 | #ifndef __ASSEMBLY__ |
64b3d0e8 | 29 | |
8d30c14c BH |
30 | /* Insert a PTE, top-level function is out of line. It uses an inline |
31 | * low level function in the respective pgtable-* files | |
32 | */ | |
33 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | |
34 | pte_t pte); | |
35 | ||
36 | /* This low level function performs the actual PTE insertion | |
37 | * Setting the PTE depends on the MMU type and other factors. It's | |
38 | * an horrible mess that I'm not going to try to clean up now but | |
39 | * I'm keeping it in one place rather than spread around | |
40 | */ | |
41 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |
42 | pte_t *ptep, pte_t pte, int percpu) | |
43 | { | |
44 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | |
45 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | |
46 | * helper pte_update() which does an atomic update. We need to do that | |
47 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | |
48 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | |
49 | * the hash bits instead (ie, same as the non-SMP case) | |
50 | */ | |
51 | if (percpu) | |
52 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
53 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
54 | else | |
55 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | |
56 | ||
57 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | |
58 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | |
59 | * can just store as long as we do the two halves in the right order | |
60 | * with a barrier in between. This is possible because we take care, | |
61 | * in the hash code, to pre-invalidate if the PTE was already hashed, | |
62 | * which synchronizes us with any concurrent invalidation. | |
63 | * In the percpu case, we also fallback to the simple update preserving | |
64 | * the hash bits | |
65 | */ | |
66 | if (percpu) { | |
67 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
68 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
69 | return; | |
70 | } | |
71 | #if _PAGE_HASHPTE != 0 | |
72 | if (pte_val(*ptep) & _PAGE_HASHPTE) | |
73 | flush_hash_entry(mm, ptep, addr); | |
74 | #endif | |
75 | __asm__ __volatile__("\ | |
76 | stw%U0%X0 %2,%0\n\ | |
77 | eieio\n\ | |
78 | stw%U0%X0 %L2,%1" | |
79 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | |
80 | : "r" (pte) : "memory"); | |
81 | ||
82 | #elif defined(CONFIG_PPC_STD_MMU_32) | |
83 | /* Third case is 32-bit hash table in UP mode, we need to preserve | |
84 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | |
85 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | |
86 | * and see we need to keep track that this PTE needs invalidating | |
87 | */ | |
88 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
89 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
90 | ||
91 | #else | |
92 | /* Anything else just stores the PTE normally. That covers all 64-bit | |
93 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | |
94 | */ | |
95 | *ptep = pte; | |
96 | #endif | |
97 | } | |
98 | ||
99 | ||
100 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
101 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
102 | pte_t *ptep, pte_t entry, int dirty); | |
103 | ||
64b3d0e8 BH |
104 | /* |
105 | * Macro to mark a page protection value as "uncacheable". | |
106 | */ | |
107 | ||
108 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | |
109 | _PAGE_WRITETHRU) | |
110 | ||
111 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
112 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
113 | ||
114 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
115 | _PAGE_NO_CACHE)) | |
116 | ||
117 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
118 | _PAGE_COHERENT)) | |
119 | ||
120 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
121 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | |
122 | ||
123 | ||
124 | struct file; | |
125 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
126 | unsigned long size, pgprot_t vma_prot); | |
127 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
128 | ||
9c709f3b DG |
129 | /* |
130 | * ZERO_PAGE is a global shared page that is always zero: used | |
131 | * for zero-mapped memory areas etc.. | |
132 | */ | |
133 | extern unsigned long empty_zero_page[]; | |
134 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
135 | ||
136 | extern pgd_t swapper_pg_dir[]; | |
137 | ||
138 | extern void paging_init(void); | |
139 | ||
140 | /* | |
141 | * kern_addr_valid is intended to indicate whether an address is a valid | |
142 | * kernel address. Most 32-bit archs define it as always true (like this) | |
143 | * but most 64-bit archs actually perform a test. What should we do here? | |
144 | */ | |
145 | #define kern_addr_valid(addr) (1) | |
146 | ||
147 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | |
148 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
149 | ||
1da177e4 | 150 | #include <asm-generic/pgtable.h> |
1e3519f8 BH |
151 | |
152 | ||
153 | /* | |
154 | * This gets called at the end of handling a page fault, when | |
155 | * the kernel has put a new PTE into the page table for the process. | |
156 | * We use it to ensure coherency between the i-cache and d-cache | |
157 | * for the page which has just been mapped in. | |
158 | * On machines which use an MMU hash table, we use this to put a | |
159 | * corresponding HPTE into the hash table ahead of time, instead of | |
160 | * waiting for the inevitable extra hash-table miss exception. | |
161 | */ | |
162 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
163 | ||
1da177e4 LT |
164 | #endif /* __ASSEMBLY__ */ |
165 | ||
88ced031 | 166 | #endif /* __KERNEL__ */ |
047ea784 | 167 | #endif /* _ASM_POWERPC_PGTABLE_H */ |