69d2e2f86bce3f65a76637e9ab3cd9251d6f3d16
2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
20 #include <asm/proc-fns.h>
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
26 * Software defined PTE bits definition.
28 #define PTE_VALID (_AT(pteval_t, 1) << 0)
29 #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
30 #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
31 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
37 * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
38 * (rounded up to PUD_SIZE).
39 * VMALLOC_START: beginning of the kernel VA space
40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41 * fixed mappings and modules
43 #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
46 #define VMALLOC_START (VA_START)
48 #include <asm/kasan.h>
49 #define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
52 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
54 #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
56 #define FIRST_USER_ADDRESS 0UL
60 #include <linux/mmdebug.h>
62 extern void __pte_error(const char *file
, int line
, unsigned long val
);
63 extern void __pmd_error(const char *file
, int line
, unsigned long val
);
64 extern void __pud_error(const char *file
, int line
, unsigned long val
);
65 extern void __pgd_error(const char *file
, int line
, unsigned long val
);
67 #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
68 #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
70 #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
71 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
72 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
73 #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
74 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
76 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
77 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
78 #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
80 #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
82 #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
83 #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84 #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
85 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
86 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
88 #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
89 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
91 #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
92 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
94 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
95 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
96 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
97 #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
98 #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
99 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
100 #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
102 #define __P000 PAGE_NONE
103 #define __P001 PAGE_READONLY
104 #define __P010 PAGE_COPY
105 #define __P011 PAGE_COPY
106 #define __P100 PAGE_READONLY_EXEC
107 #define __P101 PAGE_READONLY_EXEC
108 #define __P110 PAGE_COPY_EXEC
109 #define __P111 PAGE_COPY_EXEC
111 #define __S000 PAGE_NONE
112 #define __S001 PAGE_READONLY
113 #define __S010 PAGE_SHARED
114 #define __S011 PAGE_SHARED
115 #define __S100 PAGE_READONLY_EXEC
116 #define __S101 PAGE_READONLY_EXEC
117 #define __S110 PAGE_SHARED_EXEC
118 #define __S111 PAGE_SHARED_EXEC
121 * ZERO_PAGE is a global shared page that is always zero: used
122 * for zero-mapped memory areas etc..
124 extern struct page
*empty_zero_page
;
125 #define ZERO_PAGE(vaddr) (empty_zero_page)
127 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
129 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
131 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
133 #define pte_none(pte) (!pte_val(pte))
134 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
135 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
137 /* Find an entry in the third-level page table. */
138 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
140 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
142 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
143 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
144 #define pte_unmap(pte) do { } while (0)
145 #define pte_unmap_nested(pte) do { } while (0)
148 * The following only work if pte_present(). Undefined behaviour otherwise.
150 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
151 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
152 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
153 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
154 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
155 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
157 #ifdef CONFIG_ARM64_HW_AFDBM
158 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
160 #define pte_hw_dirty(pte) (0)
162 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
163 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
165 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
166 #define pte_valid_user(pte) \
167 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
168 #define pte_valid_not_user(pte) \
169 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
170 #define pte_valid_young(pte) \
171 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
174 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
175 * so that we don't erroneously return false for pages that have been
176 * remapped as PROT_NONE but are yet to be flushed from the TLB.
178 #define pte_accessible(mm, pte) \
179 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
181 static inline pte_t
clear_pte_bit(pte_t pte
, pgprot_t prot
)
183 pte_val(pte
) &= ~pgprot_val(prot
);
187 static inline pte_t
set_pte_bit(pte_t pte
, pgprot_t prot
)
189 pte_val(pte
) |= pgprot_val(prot
);
193 static inline pte_t
pte_wrprotect(pte_t pte
)
195 return clear_pte_bit(pte
, __pgprot(PTE_WRITE
));
198 static inline pte_t
pte_mkwrite(pte_t pte
)
200 return set_pte_bit(pte
, __pgprot(PTE_WRITE
));
203 static inline pte_t
pte_mkclean(pte_t pte
)
205 return clear_pte_bit(pte
, __pgprot(PTE_DIRTY
));
208 static inline pte_t
pte_mkdirty(pte_t pte
)
210 return set_pte_bit(pte
, __pgprot(PTE_DIRTY
));
213 static inline pte_t
pte_mkold(pte_t pte
)
215 return clear_pte_bit(pte
, __pgprot(PTE_AF
));
218 static inline pte_t
pte_mkyoung(pte_t pte
)
220 return set_pte_bit(pte
, __pgprot(PTE_AF
));
223 static inline pte_t
pte_mkspecial(pte_t pte
)
225 return set_pte_bit(pte
, __pgprot(PTE_SPECIAL
));
228 static inline pte_t
pte_mkcont(pte_t pte
)
230 pte
= set_pte_bit(pte
, __pgprot(PTE_CONT
));
231 return set_pte_bit(pte
, __pgprot(PTE_TYPE_PAGE
));
234 static inline pte_t
pte_mknoncont(pte_t pte
)
236 return clear_pte_bit(pte
, __pgprot(PTE_CONT
));
239 static inline pmd_t
pmd_mkcont(pmd_t pmd
)
241 return __pmd(pmd_val(pmd
) | PMD_SECT_CONT
);
244 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
249 * Only if the new pte is valid and kernel, otherwise TLB maintenance
250 * or update_mmu_cache() have the necessary barriers.
252 if (pte_valid_not_user(pte
)) {
259 struct vm_area_struct
;
261 extern void __sync_icache_dcache(pte_t pteval
, unsigned long addr
);
264 * PTE bits configuration in the presence of hardware Dirty Bit Management
265 * (PTE_WRITE == PTE_DBM):
267 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
273 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
274 * the page fault mechanism. Checking the dirty status of a pte becomes:
276 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
278 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
279 pte_t
*ptep
, pte_t pte
)
281 if (pte_valid_user(pte
)) {
282 if (!pte_special(pte
) && pte_exec(pte
))
283 __sync_icache_dcache(pte
, addr
);
284 if (pte_sw_dirty(pte
) && pte_write(pte
))
285 pte_val(pte
) &= ~PTE_RDONLY
;
287 pte_val(pte
) |= PTE_RDONLY
;
291 * If the existing pte is valid, check for potential race with
292 * hardware updates of the pte (ptep_set_access_flags safely changes
293 * valid ptes without going through an invalid entry).
295 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM
) &&
296 pte_valid(*ptep
) && pte_valid(pte
)) {
297 VM_WARN_ONCE(!pte_young(pte
),
298 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
299 __func__
, pte_val(*ptep
), pte_val(pte
));
300 VM_WARN_ONCE(pte_write(*ptep
) && !pte_dirty(pte
),
301 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
302 __func__
, pte_val(*ptep
), pte_val(pte
));
309 * Huge pte definitions.
311 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
312 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
315 * Hugetlb definitions.
317 #define HUGE_MAX_HSTATE 4
318 #define HPAGE_SHIFT PMD_SHIFT
319 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
320 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
321 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
323 #define __HAVE_ARCH_PTE_SPECIAL
325 static inline pte_t
pud_pte(pud_t pud
)
327 return __pte(pud_val(pud
));
330 static inline pmd_t
pud_pmd(pud_t pud
)
332 return __pmd(pud_val(pud
));
335 static inline pte_t
pmd_pte(pmd_t pmd
)
337 return __pte(pmd_val(pmd
));
340 static inline pmd_t
pte_pmd(pte_t pte
)
342 return __pmd(pte_val(pte
));
345 static inline pgprot_t
mk_sect_prot(pgprot_t prot
)
347 return __pgprot(pgprot_val(prot
) & ~PTE_TABLE_BIT
);
354 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
355 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
356 #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
357 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
358 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
359 struct vm_area_struct
;
360 void pmdp_splitting_flush(struct vm_area_struct
*vma
, unsigned long address
,
362 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
363 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
365 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
366 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
367 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
368 #define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
369 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
370 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
371 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
372 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
373 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
375 #define __HAVE_ARCH_PMD_WRITE
376 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
378 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
380 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
381 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
382 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
384 #define pud_write(pud) pte_write(pud_pte(pud))
385 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
387 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
389 static inline int has_transparent_hugepage(void)
394 #define __pgprot_modify(prot,mask,bits) \
395 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
398 * Mark the prot value as uncacheable and unbufferable.
400 #define pgprot_noncached(prot) \
401 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
402 #define pgprot_writecombine(prot) \
403 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
404 #define pgprot_device(prot) \
405 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
406 #define __HAVE_PHYS_MEM_ACCESS_PROT
408 extern pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
409 unsigned long size
, pgprot_t vma_prot
);
411 #define pmd_none(pmd) (!pmd_val(pmd))
412 #define pmd_present(pmd) (pmd_val(pmd))
414 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
416 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
418 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
421 #ifdef CONFIG_ARM64_64K_PAGES
422 #define pud_sect(pud) (0)
423 #define pud_table(pud) (1)
425 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
427 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
431 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
438 static inline void pmd_clear(pmd_t
*pmdp
)
440 set_pmd(pmdp
, __pmd(0));
443 static inline pte_t
*pmd_page_vaddr(pmd_t pmd
)
445 return __va(pmd_val(pmd
) & PHYS_MASK
& (s32
)PAGE_MASK
);
448 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
451 * Conversion functions: convert a page and protection to a page entry,
452 * and a page entry and page directory to the page they refer to.
454 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
456 #if CONFIG_PGTABLE_LEVELS > 2
458 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
460 #define pud_none(pud) (!pud_val(pud))
461 #define pud_bad(pud) (!(pud_val(pud) & 2))
462 #define pud_present(pud) (pud_val(pud))
464 static inline void set_pud(pud_t
*pudp
, pud_t pud
)
471 static inline void pud_clear(pud_t
*pudp
)
473 set_pud(pudp
, __pud(0));
476 static inline pmd_t
*pud_page_vaddr(pud_t pud
)
478 return __va(pud_val(pud
) & PHYS_MASK
& (s32
)PAGE_MASK
);
481 /* Find an entry in the second-level page table. */
482 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
484 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long addr
)
486 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(addr
);
489 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
491 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
493 #if CONFIG_PGTABLE_LEVELS > 3
495 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
497 #define pgd_none(pgd) (!pgd_val(pgd))
498 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
499 #define pgd_present(pgd) (pgd_val(pgd))
501 static inline void set_pgd(pgd_t
*pgdp
, pgd_t pgd
)
507 static inline void pgd_clear(pgd_t
*pgdp
)
509 set_pgd(pgdp
, __pgd(0));
512 static inline pud_t
*pgd_page_vaddr(pgd_t pgd
)
514 return __va(pgd_val(pgd
) & PHYS_MASK
& (s32
)PAGE_MASK
);
517 /* Find an entry in the frst-level page table. */
518 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
520 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long addr
)
522 return (pud_t
*)pgd_page_vaddr(*pgd
) + pud_index(addr
);
525 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
527 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
529 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
531 /* to find an entry in a page-table-directory */
532 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
534 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
536 /* to find an entry in a kernel page-table-directory */
537 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
539 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
541 const pteval_t mask
= PTE_USER
| PTE_PXN
| PTE_UXN
| PTE_RDONLY
|
542 PTE_PROT_NONE
| PTE_VALID
| PTE_WRITE
;
543 /* preserve the hardware dirty information */
544 if (pte_hw_dirty(pte
))
545 pte
= pte_mkdirty(pte
);
546 pte_val(pte
) = (pte_val(pte
) & ~mask
) | (pgprot_val(newprot
) & mask
);
550 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
552 return pte_pmd(pte_modify(pmd_pte(pmd
), newprot
));
555 #ifdef CONFIG_ARM64_HW_AFDBM
557 * Atomic pte/pmd modifications.
559 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
560 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
561 unsigned long address
,
565 unsigned int tmp
, res
;
567 asm volatile("// ptep_test_and_clear_young\n"
568 " prfm pstl1strm, %2\n"
570 " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
571 " and %0, %0, %4 // clear PTE_AF\n"
572 " stxr %w1, %0, %2\n"
574 : "=&r" (pteval
), "=&r" (tmp
), "+Q" (pte_val(*ptep
)), "=&r" (res
)
575 : "L" (~PTE_AF
), "I" (ilog2(PTE_AF
)));
580 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
581 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
582 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
583 unsigned long address
,
586 return ptep_test_and_clear_young(vma
, address
, (pte_t
*)pmdp
);
588 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
590 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
591 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
592 unsigned long address
, pte_t
*ptep
)
597 asm volatile("// ptep_get_and_clear\n"
598 " prfm pstl1strm, %2\n"
600 " stxr %w1, xzr, %2\n"
602 : "=&r" (old_pteval
), "=&r" (tmp
), "+Q" (pte_val(*ptep
)));
604 return __pte(old_pteval
);
607 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
608 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
609 static inline pmd_t
pmdp_get_and_clear(struct mm_struct
*mm
,
610 unsigned long address
, pmd_t
*pmdp
)
612 return pte_pmd(ptep_get_and_clear(mm
, address
, (pte_t
*)pmdp
));
614 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
617 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
618 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
620 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
621 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long address
, pte_t
*ptep
)
626 asm volatile("// ptep_set_wrprotect\n"
627 " prfm pstl1strm, %2\n"
629 " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
630 " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
631 " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
632 " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
633 " stxr %w1, %0, %2\n"
635 : "=&r" (pteval
), "=&r" (tmp
), "+Q" (pte_val(*ptep
))
636 : "r" (PTE_DIRTY
|PTE_RDONLY
), "L" (PTE_RDONLY
), "L" (~PTE_WRITE
)
640 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
641 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
642 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
643 unsigned long address
, pmd_t
*pmdp
)
645 ptep_set_wrprotect(mm
, address
, (pte_t
*)pmdp
);
648 #endif /* CONFIG_ARM64_HW_AFDBM */
650 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
651 extern pgd_t idmap_pg_dir
[PTRS_PER_PGD
];
654 * Encode and decode a swap entry:
655 * bits 0-1: present (must be zero)
656 * bits 2-7: swap type
657 * bits 8-57: swap offset
659 #define __SWP_TYPE_SHIFT 2
660 #define __SWP_TYPE_BITS 6
661 #define __SWP_OFFSET_BITS 50
662 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
663 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
664 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
666 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
667 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
668 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
670 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
671 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
674 * Ensure that there are not more swap files than can be encoded in the kernel
677 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
679 extern int kern_addr_valid(unsigned long addr
);
681 #include <asm-generic/pgtable.h>
683 void pgd_cache_init(void);
684 #define pgtable_cache_init pgd_cache_init
687 * On AArch64, the cache coherency is handled via the set_pte_at() function.
689 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
690 unsigned long addr
, pte_t
*ptep
)
693 * We don't do anything here, so there's a very small chance of
694 * us retaking a user fault which we just fixed up. The alternative
695 * is doing a dsb(ishst), but that penalises the fastpath.
699 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
701 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
702 #define kc_offset_to_vaddr(o) ((o) | VA_START)
704 #endif /* !__ASSEMBLY__ */
706 #endif /* __ASM_PGTABLE_H */
This page took 0.0462 seconds and 5 git commands to generate.