Commit | Line | Data |
---|---|---|
4f04d8f0 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_PGTABLE_H | |
17 | #define __ASM_PGTABLE_H | |
18 | ||
2f4b829c | 19 | #include <asm/bug.h> |
4f04d8f0 CM |
20 | #include <asm/proc-fns.h> |
21 | ||
22 | #include <asm/memory.h> | |
23 | #include <asm/pgtable-hwdef.h> | |
3eca86e7 | 24 | #include <asm/pgtable-prot.h> |
4f04d8f0 CM |
25 | |
26 | /* | |
27 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. | |
08375198 | 28 | * |
dfd55ad8 | 29 | * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array |
08375198 | 30 | * (rounded up to PUD_SIZE). |
f9040773 | 31 | * VMALLOC_START: beginning of the kernel vmalloc space |
08375198 CM |
32 | * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, |
33 | * fixed mappings and modules | |
4f04d8f0 | 34 | */ |
36e5cd6b | 35 | #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) |
39d114dd | 36 | |
f9040773 | 37 | #define VMALLOC_START (MODULES_END) |
08375198 | 38 | #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
4f04d8f0 | 39 | |
dfd55ad8 | 40 | #define VMEMMAP_START (VMALLOC_END + SZ_64K) |
36e5cd6b AB |
41 | #define vmemmap ((struct page *)VMEMMAP_START - \ |
42 | SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT)) | |
4f04d8f0 | 43 | |
d016bf7e | 44 | #define FIRST_USER_ADDRESS 0UL |
4f04d8f0 CM |
45 | |
46 | #ifndef __ASSEMBLY__ | |
2f4b829c | 47 | |
961faac1 | 48 | #include <asm/fixmap.h> |
2f4b829c CM |
49 | #include <linux/mmdebug.h> |
50 | ||
4f04d8f0 CM |
51 | extern void __pte_error(const char *file, int line, unsigned long val); |
52 | extern void __pmd_error(const char *file, int line, unsigned long val); | |
c79b954b | 53 | extern void __pud_error(const char *file, int line, unsigned long val); |
4f04d8f0 CM |
54 | extern void __pgd_error(const char *file, int line, unsigned long val); |
55 | ||
4f04d8f0 CM |
56 | /* |
57 | * ZERO_PAGE is a global shared page that is always zero: used | |
58 | * for zero-mapped memory areas etc.. | |
59 | */ | |
5227cfa7 MR |
60 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
61 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | |
4f04d8f0 | 62 | |
7078db46 CM |
63 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) |
64 | ||
4f04d8f0 CM |
65 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
66 | ||
67 | #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
68 | ||
69 | #define pte_none(pte) (!pte_val(pte)) | |
70 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | |
71 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
7078db46 | 72 | |
4f04d8f0 CM |
73 | /* |
74 | * The following only work if pte_present(). Undefined behaviour otherwise. | |
75 | */ | |
84fe6826 | 76 | #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) |
84fe6826 SC |
77 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
78 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) | |
79 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | |
8e620b04 | 80 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
93ef666a | 81 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
ac15bd63 | 82 | #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) |
4f04d8f0 | 83 | |
2f4b829c | 84 | #ifdef CONFIG_ARM64_HW_AFDBM |
b847415c | 85 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
2f4b829c CM |
86 | #else |
87 | #define pte_hw_dirty(pte) (0) | |
88 | #endif | |
89 | #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) | |
90 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | |
91 | ||
766ffb69 | 92 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
7f0b1bf0 CM |
93 | #define pte_valid_not_user(pte) \ |
94 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | |
76c714be WD |
95 | #define pte_valid_young(pte) \ |
96 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) | |
97 | ||
98 | /* | |
99 | * Could the pte be present in the TLB? We must check mm_tlb_flush_pending | |
100 | * so that we don't erroneously return false for pages that have been | |
101 | * remapped as PROT_NONE but are yet to be flushed from the TLB. | |
102 | */ | |
103 | #define pte_accessible(mm, pte) \ | |
104 | (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) | |
4f04d8f0 | 105 | |
b6d4f280 | 106 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 107 | { |
b6d4f280 | 108 | pte_val(pte) &= ~pgprot_val(prot); |
44b6dfc5 SC |
109 | return pte; |
110 | } | |
111 | ||
b6d4f280 | 112 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 113 | { |
b6d4f280 | 114 | pte_val(pte) |= pgprot_val(prot); |
44b6dfc5 SC |
115 | return pte; |
116 | } | |
117 | ||
b6d4f280 LA |
118 | static inline pte_t pte_wrprotect(pte_t pte) |
119 | { | |
120 | return clear_pte_bit(pte, __pgprot(PTE_WRITE)); | |
121 | } | |
122 | ||
123 | static inline pte_t pte_mkwrite(pte_t pte) | |
124 | { | |
125 | return set_pte_bit(pte, __pgprot(PTE_WRITE)); | |
126 | } | |
127 | ||
44b6dfc5 SC |
128 | static inline pte_t pte_mkclean(pte_t pte) |
129 | { | |
b6d4f280 | 130 | return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
44b6dfc5 SC |
131 | } |
132 | ||
133 | static inline pte_t pte_mkdirty(pte_t pte) | |
134 | { | |
b6d4f280 | 135 | return set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
44b6dfc5 SC |
136 | } |
137 | ||
138 | static inline pte_t pte_mkold(pte_t pte) | |
139 | { | |
b6d4f280 | 140 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
141 | } |
142 | ||
143 | static inline pte_t pte_mkyoung(pte_t pte) | |
144 | { | |
b6d4f280 | 145 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
146 | } |
147 | ||
148 | static inline pte_t pte_mkspecial(pte_t pte) | |
149 | { | |
b6d4f280 | 150 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
44b6dfc5 | 151 | } |
4f04d8f0 | 152 | |
93ef666a JL |
153 | static inline pte_t pte_mkcont(pte_t pte) |
154 | { | |
66b3923a DW |
155 | pte = set_pte_bit(pte, __pgprot(PTE_CONT)); |
156 | return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); | |
93ef666a JL |
157 | } |
158 | ||
159 | static inline pte_t pte_mknoncont(pte_t pte) | |
160 | { | |
161 | return clear_pte_bit(pte, __pgprot(PTE_CONT)); | |
162 | } | |
163 | ||
66b3923a DW |
164 | static inline pmd_t pmd_mkcont(pmd_t pmd) |
165 | { | |
166 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); | |
167 | } | |
168 | ||
4f04d8f0 CM |
169 | static inline void set_pte(pte_t *ptep, pte_t pte) |
170 | { | |
171 | *ptep = pte; | |
7f0b1bf0 CM |
172 | |
173 | /* | |
174 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | |
175 | * or update_mmu_cache() have the necessary barriers. | |
176 | */ | |
177 | if (pte_valid_not_user(pte)) { | |
178 | dsb(ishst); | |
179 | isb(); | |
180 | } | |
4f04d8f0 CM |
181 | } |
182 | ||
2f4b829c CM |
183 | struct mm_struct; |
184 | struct vm_area_struct; | |
185 | ||
4f04d8f0 CM |
186 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); |
187 | ||
2f4b829c CM |
188 | /* |
189 | * PTE bits configuration in the presence of hardware Dirty Bit Management | |
190 | * (PTE_WRITE == PTE_DBM): | |
191 | * | |
192 | * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) | |
193 | * 0 0 | 1 0 0 | |
194 | * 0 1 | 1 1 0 | |
195 | * 1 0 | 1 0 1 | |
196 | * 1 1 | 0 1 x | |
197 | * | |
198 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via | |
199 | * the page fault mechanism. Checking the dirty status of a pte becomes: | |
200 | * | |
b847415c | 201 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
2f4b829c | 202 | */ |
4f04d8f0 CM |
203 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
204 | pte_t *ptep, pte_t pte) | |
205 | { | |
fdc69e7d | 206 | if (pte_present(pte)) { |
2f4b829c | 207 | if (pte_sw_dirty(pte) && pte_write(pte)) |
c2c93e5b SC |
208 | pte_val(pte) &= ~PTE_RDONLY; |
209 | else | |
210 | pte_val(pte) |= PTE_RDONLY; | |
ac15bd63 CM |
211 | if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) |
212 | __sync_icache_dcache(pte, addr); | |
02522463 WD |
213 | } |
214 | ||
2f4b829c CM |
215 | /* |
216 | * If the existing pte is valid, check for potential race with | |
217 | * hardware updates of the pte (ptep_set_access_flags safely changes | |
218 | * valid ptes without going through an invalid entry). | |
219 | */ | |
82d34008 CM |
220 | if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && |
221 | pte_valid(*ptep) && pte_valid(pte)) { | |
222 | VM_WARN_ONCE(!pte_young(pte), | |
223 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", | |
224 | __func__, pte_val(*ptep), pte_val(pte)); | |
225 | VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), | |
226 | "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", | |
227 | __func__, pte_val(*ptep), pte_val(pte)); | |
2f4b829c CM |
228 | } |
229 | ||
4f04d8f0 CM |
230 | set_pte(ptep, pte); |
231 | } | |
232 | ||
233 | /* | |
234 | * Huge pte definitions. | |
235 | */ | |
084bd298 SC |
236 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
237 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | |
238 | ||
239 | /* | |
240 | * Hugetlb definitions. | |
241 | */ | |
66b3923a | 242 | #define HUGE_MAX_HSTATE 4 |
084bd298 SC |
243 | #define HPAGE_SHIFT PMD_SHIFT |
244 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
245 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
246 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
4f04d8f0 | 247 | |
4f04d8f0 CM |
248 | #define __HAVE_ARCH_PTE_SPECIAL |
249 | ||
29e56940 SC |
250 | static inline pte_t pud_pte(pud_t pud) |
251 | { | |
252 | return __pte(pud_val(pud)); | |
253 | } | |
254 | ||
255 | static inline pmd_t pud_pmd(pud_t pud) | |
256 | { | |
257 | return __pmd(pud_val(pud)); | |
258 | } | |
259 | ||
9c7e535f SC |
260 | static inline pte_t pmd_pte(pmd_t pmd) |
261 | { | |
262 | return __pte(pmd_val(pmd)); | |
263 | } | |
af074848 | 264 | |
9c7e535f SC |
265 | static inline pmd_t pte_pmd(pte_t pte) |
266 | { | |
267 | return __pmd(pte_val(pte)); | |
268 | } | |
af074848 | 269 | |
8ce837ce AB |
270 | static inline pgprot_t mk_sect_prot(pgprot_t prot) |
271 | { | |
272 | return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); | |
273 | } | |
274 | ||
af074848 SC |
275 | /* |
276 | * THP definitions. | |
277 | */ | |
af074848 SC |
278 | |
279 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
280 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | |
29e56940 | 281 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
af074848 | 282 | |
c164e038 | 283 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
9c7e535f SC |
284 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
285 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | |
9c7e535f SC |
286 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
287 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
05ee26d9 | 288 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
9c7e535f SC |
289 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
290 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
e3a920af | 291 | #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) |
af074848 | 292 | |
9c7e535f SC |
293 | #define __HAVE_ARCH_PMD_WRITE |
294 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | |
af074848 SC |
295 | |
296 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
297 | ||
298 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | |
299 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
300 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | |
301 | ||
29e56940 | 302 | #define pud_write(pud) pte_write(pud_pte(pud)) |
206a2a73 | 303 | #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) |
af074848 | 304 | |
ceb21835 | 305 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
af074848 SC |
306 | |
307 | static inline int has_transparent_hugepage(void) | |
308 | { | |
309 | return 1; | |
310 | } | |
311 | ||
a501e324 CM |
312 | #define __pgprot_modify(prot,mask,bits) \ |
313 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
314 | ||
4f04d8f0 CM |
315 | /* |
316 | * Mark the prot value as uncacheable and unbufferable. | |
317 | */ | |
318 | #define pgprot_noncached(prot) \ | |
de2db743 | 319 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
4f04d8f0 | 320 | #define pgprot_writecombine(prot) \ |
de2db743 | 321 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
d1e6dc91 LD |
322 | #define pgprot_device(prot) \ |
323 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) | |
4f04d8f0 CM |
324 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
325 | struct file; | |
326 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
327 | unsigned long size, pgprot_t vma_prot); | |
328 | ||
329 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
330 | #define pmd_present(pmd) (pmd_val(pmd)) | |
331 | ||
332 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | |
333 | ||
36311607 MZ |
334 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
335 | PMD_TYPE_TABLE) | |
336 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
337 | PMD_TYPE_SECT) | |
338 | ||
cac4b8cd | 339 | #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 |
206a2a73 | 340 | #define pud_sect(pud) (0) |
523d6e9f | 341 | #define pud_table(pud) (1) |
206a2a73 SC |
342 | #else |
343 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | |
344 | PUD_TYPE_SECT) | |
523d6e9f | 345 | #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ |
346 | PUD_TYPE_TABLE) | |
206a2a73 | 347 | #endif |
36311607 | 348 | |
4f04d8f0 CM |
349 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
350 | { | |
351 | *pmdp = pmd; | |
98f7685e | 352 | dsb(ishst); |
7f0b1bf0 | 353 | isb(); |
4f04d8f0 CM |
354 | } |
355 | ||
356 | static inline void pmd_clear(pmd_t *pmdp) | |
357 | { | |
358 | set_pmd(pmdp, __pmd(0)); | |
359 | } | |
360 | ||
dca56dca | 361 | static inline phys_addr_t pmd_page_paddr(pmd_t pmd) |
4f04d8f0 | 362 | { |
dca56dca | 363 | return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; |
4f04d8f0 CM |
364 | } |
365 | ||
053520f7 MR |
366 | /* Find an entry in the third-level page table. */ |
367 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
368 | ||
dca56dca MR |
369 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) |
370 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) | |
053520f7 MR |
371 | |
372 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
373 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
374 | #define pte_unmap(pte) do { } while (0) | |
375 | #define pte_unmap_nested(pte) do { } while (0) | |
376 | ||
961faac1 MR |
377 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
378 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) | |
379 | #define pte_clear_fixmap() clear_fixmap(FIX_PTE) | |
380 | ||
4f04d8f0 CM |
381 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
382 | ||
6533945a AB |
383 | /* use ONLY for statically allocated translation tables */ |
384 | #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) | |
385 | ||
4f04d8f0 CM |
386 | /* |
387 | * Conversion functions: convert a page and protection to a page entry, | |
388 | * and a page entry and page directory to the page they refer to. | |
389 | */ | |
390 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
391 | ||
9f25e6ad | 392 | #if CONFIG_PGTABLE_LEVELS > 2 |
4f04d8f0 | 393 | |
7078db46 CM |
394 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) |
395 | ||
4f04d8f0 CM |
396 | #define pud_none(pud) (!pud_val(pud)) |
397 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | |
398 | #define pud_present(pud) (pud_val(pud)) | |
399 | ||
400 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
401 | { | |
402 | *pudp = pud; | |
98f7685e | 403 | dsb(ishst); |
7f0b1bf0 | 404 | isb(); |
4f04d8f0 CM |
405 | } |
406 | ||
407 | static inline void pud_clear(pud_t *pudp) | |
408 | { | |
409 | set_pud(pudp, __pud(0)); | |
410 | } | |
411 | ||
dca56dca | 412 | static inline phys_addr_t pud_page_paddr(pud_t pud) |
4f04d8f0 | 413 | { |
dca56dca | 414 | return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; |
4f04d8f0 CM |
415 | } |
416 | ||
7078db46 CM |
417 | /* Find an entry in the second-level page table. */ |
418 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
419 | ||
dca56dca MR |
420 | #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) |
421 | #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) | |
7078db46 | 422 | |
961faac1 MR |
423 | #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) |
424 | #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) | |
425 | #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) | |
7078db46 | 426 | |
5d96e0cb | 427 | #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) |
29e56940 | 428 | |
6533945a AB |
429 | /* use ONLY for statically allocated translation tables */ |
430 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) | |
431 | ||
dca56dca MR |
432 | #else |
433 | ||
434 | #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) | |
435 | ||
961faac1 MR |
436 | /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ |
437 | #define pmd_set_fixmap(addr) NULL | |
438 | #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) | |
439 | #define pmd_clear_fixmap() | |
440 | ||
6533945a AB |
441 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) |
442 | ||
9f25e6ad | 443 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
4f04d8f0 | 444 | |
9f25e6ad | 445 | #if CONFIG_PGTABLE_LEVELS > 3 |
c79b954b | 446 | |
7078db46 CM |
447 | #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) |
448 | ||
c79b954b JL |
449 | #define pgd_none(pgd) (!pgd_val(pgd)) |
450 | #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) | |
451 | #define pgd_present(pgd) (pgd_val(pgd)) | |
452 | ||
453 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | |
454 | { | |
455 | *pgdp = pgd; | |
456 | dsb(ishst); | |
457 | } | |
458 | ||
459 | static inline void pgd_clear(pgd_t *pgdp) | |
460 | { | |
461 | set_pgd(pgdp, __pgd(0)); | |
462 | } | |
463 | ||
dca56dca | 464 | static inline phys_addr_t pgd_page_paddr(pgd_t pgd) |
c79b954b | 465 | { |
dca56dca | 466 | return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; |
c79b954b JL |
467 | } |
468 | ||
7078db46 CM |
469 | /* Find an entry in the frst-level page table. */ |
470 | #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) | |
471 | ||
dca56dca MR |
472 | #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) |
473 | #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) | |
7078db46 | 474 | |
961faac1 MR |
475 | #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) |
476 | #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) | |
477 | #define pud_clear_fixmap() clear_fixmap(FIX_PUD) | |
7078db46 | 478 | |
5d96e0cb JL |
479 | #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) |
480 | ||
6533945a AB |
481 | /* use ONLY for statically allocated translation tables */ |
482 | #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) | |
483 | ||
dca56dca MR |
484 | #else |
485 | ||
486 | #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) | |
487 | ||
961faac1 MR |
488 | /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ |
489 | #define pud_set_fixmap(addr) NULL | |
490 | #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) | |
491 | #define pud_clear_fixmap() | |
492 | ||
6533945a AB |
493 | #define pud_offset_kimg(dir,addr) ((pud_t *)dir) |
494 | ||
9f25e6ad | 495 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
c79b954b | 496 | |
7078db46 CM |
497 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) |
498 | ||
4f04d8f0 CM |
499 | /* to find an entry in a page-table-directory */ |
500 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
501 | ||
dca56dca MR |
502 | #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) |
503 | ||
504 | #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) | |
4f04d8f0 CM |
505 | |
506 | /* to find an entry in a kernel page-table-directory */ | |
507 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
508 | ||
961faac1 MR |
509 | #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) |
510 | #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) | |
511 | ||
4f04d8f0 CM |
512 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
513 | { | |
a6fadf7e | 514 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
1a541b4e | 515 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
2f4b829c CM |
516 | /* preserve the hardware dirty information */ |
517 | if (pte_hw_dirty(pte)) | |
62d96c71 | 518 | pte = pte_mkdirty(pte); |
4f04d8f0 CM |
519 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
520 | return pte; | |
521 | } | |
522 | ||
9c7e535f SC |
523 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
524 | { | |
525 | return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); | |
526 | } | |
527 | ||
2f4b829c CM |
528 | #ifdef CONFIG_ARM64_HW_AFDBM |
529 | /* | |
530 | * Atomic pte/pmd modifications. | |
531 | */ | |
532 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
533 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
534 | unsigned long address, | |
535 | pte_t *ptep) | |
536 | { | |
537 | pteval_t pteval; | |
538 | unsigned int tmp, res; | |
539 | ||
540 | asm volatile("// ptep_test_and_clear_young\n" | |
541 | " prfm pstl1strm, %2\n" | |
542 | "1: ldxr %0, %2\n" | |
543 | " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n" | |
544 | " and %0, %0, %4 // clear PTE_AF\n" | |
545 | " stxr %w1, %0, %2\n" | |
546 | " cbnz %w1, 1b\n" | |
547 | : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res) | |
548 | : "L" (~PTE_AF), "I" (ilog2(PTE_AF))); | |
549 | ||
550 | return res; | |
551 | } | |
552 | ||
553 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
554 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
555 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
556 | unsigned long address, | |
557 | pmd_t *pmdp) | |
558 | { | |
559 | return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); | |
560 | } | |
561 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
562 | ||
563 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
564 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
565 | unsigned long address, pte_t *ptep) | |
566 | { | |
567 | pteval_t old_pteval; | |
568 | unsigned int tmp; | |
569 | ||
570 | asm volatile("// ptep_get_and_clear\n" | |
571 | " prfm pstl1strm, %2\n" | |
572 | "1: ldxr %0, %2\n" | |
573 | " stxr %w1, xzr, %2\n" | |
574 | " cbnz %w1, 1b\n" | |
575 | : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))); | |
576 | ||
577 | return __pte(old_pteval); | |
578 | } | |
579 | ||
580 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
581 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | |
582 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |
583 | unsigned long address, pmd_t *pmdp) | |
584 | { | |
585 | return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); | |
586 | } | |
587 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
588 | ||
589 | /* | |
590 | * ptep_set_wrprotect - mark read-only while trasferring potential hardware | |
591 | * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. | |
592 | */ | |
593 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
594 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | |
595 | { | |
596 | pteval_t pteval; | |
597 | unsigned long tmp; | |
598 | ||
599 | asm volatile("// ptep_set_wrprotect\n" | |
600 | " prfm pstl1strm, %2\n" | |
601 | "1: ldxr %0, %2\n" | |
602 | " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n" | |
603 | " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n" | |
604 | " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n" | |
605 | " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n" | |
606 | " stxr %w1, %0, %2\n" | |
607 | " cbnz %w1, 1b\n" | |
608 | : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) | |
609 | : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE) | |
610 | : "cc"); | |
611 | } | |
612 | ||
613 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
614 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | |
615 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
616 | unsigned long address, pmd_t *pmdp) | |
617 | { | |
618 | ptep_set_wrprotect(mm, address, (pte_t *)pmdp); | |
619 | } | |
620 | #endif | |
621 | #endif /* CONFIG_ARM64_HW_AFDBM */ | |
622 | ||
4f04d8f0 CM |
623 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
624 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |
625 | ||
4f04d8f0 CM |
626 | /* |
627 | * Encode and decode a swap entry: | |
3676f9ef | 628 | * bits 0-1: present (must be zero) |
9b3e661e KS |
629 | * bits 2-7: swap type |
630 | * bits 8-57: swap offset | |
fdc69e7d | 631 | * bit 58: PTE_PROT_NONE (must be zero) |
4f04d8f0 | 632 | */ |
9b3e661e | 633 | #define __SWP_TYPE_SHIFT 2 |
4f04d8f0 | 634 | #define __SWP_TYPE_BITS 6 |
9b3e661e | 635 | #define __SWP_OFFSET_BITS 50 |
4f04d8f0 CM |
636 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
637 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
3676f9ef | 638 | #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) |
4f04d8f0 CM |
639 | |
640 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
3676f9ef | 641 | #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) |
4f04d8f0 CM |
642 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
643 | ||
644 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
645 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
646 | ||
647 | /* | |
648 | * Ensure that there are not more swap files than can be encoded in the kernel | |
aad9061b | 649 | * PTEs. |
4f04d8f0 CM |
650 | */ |
651 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
652 | ||
4f04d8f0 CM |
653 | extern int kern_addr_valid(unsigned long addr); |
654 | ||
655 | #include <asm-generic/pgtable.h> | |
656 | ||
39b5be9b WD |
657 | void pgd_cache_init(void); |
658 | #define pgtable_cache_init pgd_cache_init | |
4f04d8f0 | 659 | |
cba3574f WD |
660 | /* |
661 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | |
662 | */ | |
663 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
664 | unsigned long addr, pte_t *ptep) | |
665 | { | |
666 | /* | |
120798d2 WD |
667 | * We don't do anything here, so there's a very small chance of |
668 | * us retaking a user fault which we just fixed up. The alternative | |
669 | * is doing a dsb(ishst), but that penalises the fastpath. | |
cba3574f | 670 | */ |
cba3574f WD |
671 | } |
672 | ||
673 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | |
674 | ||
7db743c6 CM |
675 | #define kc_vaddr_to_offset(v) ((v) & ~VA_START) |
676 | #define kc_offset_to_vaddr(o) ((o) | VA_START) | |
677 | ||
4f04d8f0 CM |
678 | #endif /* !__ASSEMBLY__ */ |
679 | ||
680 | #endif /* __ASM_PGTABLE_H */ |