Merge branch 'linus' into perf/core, to fix conflicts
[deliverable/linux.git] / arch / x86 / include / asm / pgtable-2level.h
1 #ifndef _ASM_X86_PGTABLE_2LEVEL_H
2 #define _ASM_X86_PGTABLE_2LEVEL_H
3
4 #define pte_ERROR(e) \
5 pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
6 #define pgd_ERROR(e) \
7 pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
8
9 /*
10 * Certain architectures need to do special things when PTEs
11 * within a page table are directly modified. Thus, the following
12 * hook is made available.
13 */
14 static inline void native_set_pte(pte_t *ptep , pte_t pte)
15 {
16 *ptep = pte;
17 }
18
19 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
20 {
21 *pmdp = pmd;
22 }
23
24 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
25 {
26 native_set_pte(ptep, pte);
27 }
28
29 static inline void native_pmd_clear(pmd_t *pmdp)
30 {
31 native_set_pmd(pmdp, __pmd(0));
32 }
33
34 static inline void native_pte_clear(struct mm_struct *mm,
35 unsigned long addr, pte_t *xp)
36 {
37 *xp = native_make_pte(0);
38 }
39
40 #ifdef CONFIG_SMP
41 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
42 {
43 return __pte(xchg(&xp->pte_low, 0));
44 }
45 #else
46 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
47 #endif
48
49 #ifdef CONFIG_SMP
50 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
51 {
52 return __pmd(xchg((pmdval_t *)xp, 0));
53 }
54 #else
55 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56 #endif
57
58 /* Bit manipulation helper on pte/pgoff entry */
59 static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
60 unsigned long mask, unsigned int leftshift)
61 {
62 return ((value >> rightshift) & mask) << leftshift;
63 }
64
65 /*
66 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
67 * split up the 29 bits of offset into this range.
68 */
69 #define PTE_FILE_MAX_BITS 29
70 #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
71 #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
72 #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
73 #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
74 #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
75
76 #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
77 #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
78
79 #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
80 #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
81
82 static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
83 {
84 return (pgoff_t)
85 (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
86 pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
87 pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
88 }
89
90 static __always_inline pte_t pgoff_to_pte(pgoff_t off)
91 {
92 return (pte_t){
93 .pte_low =
94 pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
95 pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
96 pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
97 _PAGE_FILE,
98 };
99 }
100
101 /* Encode and de-code a swap entry */
102 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
103 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
104
105 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
106
107 #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
108 & ((1U << SWP_TYPE_BITS) - 1))
109 #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
110 #define __swp_entry(type, offset) ((swp_entry_t) { \
111 ((type) << (_PAGE_BIT_PRESENT + 1)) \
112 | ((offset) << SWP_OFFSET_SHIFT) })
113 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
114 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
115
116 #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
This page took 0.039268 seconds and 5 git commands to generate.