powerpc/mm/thp: Abstraction for THP functions
[deliverable/linux.git] / arch / powerpc / include / asm / book3s / 64 / radix.h
1 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H
2 #define _ASM_POWERPC_PGTABLE_RADIX_H
3
4 #ifndef __ASSEMBLY__
5 #include <asm/cmpxchg.h>
6 #endif
7
8 #ifdef CONFIG_PPC_64K_PAGES
9 #include <asm/book3s/64/radix-64k.h>
10 #else
11 #include <asm/book3s/64/radix-4k.h>
12 #endif
13
14 /* An empty PTE can still have a R or C writeback */
15 #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
16
17 /* Bits to set in a RPMD/RPUD/RPGD */
18 #define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
19 #define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
20 #define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
21
22 /* Don't have anything in the reserved bits and leaf bits */
23 #define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
24 #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
25 #define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
26
27 /*
28 * Size of EA range mapped by our pagetables.
29 */
30 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
31 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
32 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
33
34 /*
35 * We support 52 bit address space, Use top bit for kernel
36 * virtual mapping. Also make sure kernel fit in the top
37 * quadrant.
38 *
39 * +------------------+
40 * +------------------+ Kernel virtual map (0xc008000000000000)
41 * | |
42 * | |
43 * | |
44 * 0b11......+------------------+ Kernel linear map (0xc....)
45 * | |
46 * | 2 quadrant |
47 * | |
48 * 0b10......+------------------+
49 * | |
50 * | 1 quadrant |
51 * | |
52 * 0b01......+------------------+
53 * | |
54 * | 0 quadrant |
55 * | |
56 * 0b00......+------------------+
57 *
58 *
59 * 3rd quadrant expanded:
60 * +------------------------------+
61 * | |
62 * | |
63 * | |
64 * +------------------------------+ Kernel IO map end (0xc010000000000000)
65 * | |
66 * | |
67 * | 1/2 of virtual map |
68 * | |
69 * | |
70 * +------------------------------+ Kernel IO map start
71 * | |
72 * | 1/4 of virtual map |
73 * | |
74 * +------------------------------+ Kernel vmemap start
75 * | |
76 * | 1/4 of virtual map |
77 * | |
78 * +------------------------------+ Kernel virt start (0xc008000000000000)
79 * | |
80 * | |
81 * | |
82 * +------------------------------+ Kernel linear (0xc.....)
83 */
84
85 #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
86 #define RADIX_KERN_VIRT_SIZE ASM_CONST(0x0008000000000000)
87
88 /*
89 * The vmalloc space starts at the beginning of that region, and
90 * occupies a quarter of it on radix config.
91 * (we keep a quarter for the virtual memmap)
92 */
93 #define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
94 #define RADIX_VMALLOC_SIZE (RADIX_KERN_VIRT_SIZE >> 2)
95 #define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
96 /*
97 * Defines the address of the vmemap area, in its own region on
98 * hash table CPUs.
99 */
100 #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END)
101
102 #ifndef __ASSEMBLY__
103 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
104 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
105 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
106 #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
107
108 static inline unsigned long radix__pte_update(struct mm_struct *mm,
109 unsigned long addr,
110 pte_t *ptep, unsigned long clr,
111 unsigned long set,
112 int huge)
113 {
114 pte_t pte;
115 unsigned long old_pte, new_pte;
116
117 do {
118 pte = READ_ONCE(*ptep);
119 old_pte = pte_val(pte);
120 new_pte = (old_pte | set) & ~clr;
121
122 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
123
124 /* We already do a sync in cmpxchg, is ptesync needed ?*/
125 asm volatile("ptesync" : : : "memory");
126 /* huge pages use the old page table lock */
127 if (!huge)
128 assert_pte_locked(mm, addr);
129
130 return old_pte;
131 }
132
133 /*
134 * Set the dirty and/or accessed bits atomically in a linux PTE, this
135 * function doesn't need to invalidate tlb.
136 */
137 static inline void radix__ptep_set_access_flags(pte_t *ptep, pte_t entry)
138 {
139 pte_t pte;
140 unsigned long old_pte, new_pte;
141 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
142 _PAGE_RW | _PAGE_EXEC);
143 do {
144 pte = READ_ONCE(*ptep);
145 old_pte = pte_val(pte);
146 new_pte = old_pte | set;
147
148 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
149
150 /* We already do a sync in cmpxchg, is ptesync needed ?*/
151 asm volatile("ptesync" : : : "memory");
152 }
153
154 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
155 {
156 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
157 }
158
159 static inline int radix__pte_none(pte_t pte)
160 {
161 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
162 }
163
164 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
165 pte_t *ptep, pte_t pte, int percpu)
166 {
167 *ptep = pte;
168 asm volatile("ptesync" : : : "memory");
169 }
170
171 static inline int radix__pmd_bad(pmd_t pmd)
172 {
173 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
174 }
175
176 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
177 {
178 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
179 }
180
181 static inline int radix__pud_bad(pud_t pud)
182 {
183 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
184 }
185
186
187 static inline int radix__pgd_bad(pgd_t pgd)
188 {
189 return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
190 }
191
192 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
193
194 static inline int radix__pmd_trans_huge(pmd_t pmd)
195 {
196 return !!(pmd_val(pmd) & _PAGE_PTE);
197 }
198
199 #endif
200
201 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
202 unsigned long page_size,
203 unsigned long phys);
204 extern void radix__vmemmap_remove_mapping(unsigned long start,
205 unsigned long page_size);
206
207 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
208 pgprot_t flags, unsigned int psz);
209 #endif /* __ASSEMBLY__ */
210 #endif
This page took 0.044483 seconds and 5 git commands to generate.