Commit | Line | Data |
---|---|---|
1e133ab2 MS |
1 | /* |
2 | * Page table allocation functions | |
3 | * | |
4 | * Copyright IBM Corp. 2016 | |
5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | |
6 | */ | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/sysctl.h> | |
10 | #include <asm/mmu_context.h> | |
11 | #include <asm/pgalloc.h> | |
12 | #include <asm/gmap.h> | |
13 | #include <asm/tlb.h> | |
14 | #include <asm/tlbflush.h> | |
15 | ||
16 | #ifdef CONFIG_PGSTE | |
17 | ||
18 | static int page_table_allocate_pgste_min = 0; | |
19 | static int page_table_allocate_pgste_max = 1; | |
20 | int page_table_allocate_pgste = 0; | |
21 | EXPORT_SYMBOL(page_table_allocate_pgste); | |
22 | ||
23 | static struct ctl_table page_table_sysctl[] = { | |
24 | { | |
25 | .procname = "allocate_pgste", | |
26 | .data = &page_table_allocate_pgste, | |
27 | .maxlen = sizeof(int), | |
28 | .mode = S_IRUGO | S_IWUSR, | |
29 | .proc_handler = proc_dointvec, | |
30 | .extra1 = &page_table_allocate_pgste_min, | |
31 | .extra2 = &page_table_allocate_pgste_max, | |
32 | }, | |
33 | { } | |
34 | }; | |
35 | ||
36 | static struct ctl_table page_table_sysctl_dir[] = { | |
37 | { | |
38 | .procname = "vm", | |
39 | .maxlen = 0, | |
40 | .mode = 0555, | |
41 | .child = page_table_sysctl, | |
42 | }, | |
43 | { } | |
44 | }; | |
45 | ||
46 | static int __init page_table_register_sysctl(void) | |
47 | { | |
48 | return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; | |
49 | } | |
50 | __initcall(page_table_register_sysctl); | |
51 | ||
52 | #endif /* CONFIG_PGSTE */ | |
53 | ||
54 | unsigned long *crst_table_alloc(struct mm_struct *mm) | |
55 | { | |
56 | struct page *page = alloc_pages(GFP_KERNEL, 2); | |
57 | ||
58 | if (!page) | |
59 | return NULL; | |
60 | return (unsigned long *) page_to_phys(page); | |
61 | } | |
62 | ||
63 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | |
64 | { | |
65 | free_pages((unsigned long) table, 2); | |
66 | } | |
67 | ||
68 | static void __crst_table_upgrade(void *arg) | |
69 | { | |
70 | struct mm_struct *mm = arg; | |
71 | ||
72 | if (current->active_mm == mm) { | |
73 | clear_user_asce(); | |
74 | set_user_asce(mm); | |
75 | } | |
76 | __tlb_flush_local(); | |
77 | } | |
78 | ||
723cacbd | 79 | int crst_table_upgrade(struct mm_struct *mm) |
1e133ab2 MS |
80 | { |
81 | unsigned long *table, *pgd; | |
1e133ab2 | 82 | |
723cacbd GS |
83 | /* upgrade should only happen from 3 to 4 levels */ |
84 | BUG_ON(mm->context.asce_limit != (1UL << 42)); | |
85 | ||
1e133ab2 MS |
86 | table = crst_table_alloc(mm); |
87 | if (!table) | |
88 | return -ENOMEM; | |
723cacbd | 89 | |
1e133ab2 | 90 | spin_lock_bh(&mm->page_table_lock); |
723cacbd GS |
91 | pgd = (unsigned long *) mm->pgd; |
92 | crst_table_init(table, _REGION2_ENTRY_EMPTY); | |
93 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | |
94 | mm->pgd = (pgd_t *) table; | |
95 | mm->context.asce_limit = 1UL << 53; | |
96 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | |
97 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; | |
98 | mm->task_size = mm->context.asce_limit; | |
1e133ab2 | 99 | spin_unlock_bh(&mm->page_table_lock); |
723cacbd GS |
100 | |
101 | on_each_cpu(__crst_table_upgrade, mm, 0); | |
1e133ab2 MS |
102 | return 0; |
103 | } | |
104 | ||
723cacbd | 105 | void crst_table_downgrade(struct mm_struct *mm) |
1e133ab2 MS |
106 | { |
107 | pgd_t *pgd; | |
108 | ||
723cacbd GS |
109 | /* downgrade should only happen from 3 to 2 levels (compat only) */ |
110 | BUG_ON(mm->context.asce_limit != (1UL << 42)); | |
111 | ||
1e133ab2 MS |
112 | if (current->active_mm == mm) { |
113 | clear_user_asce(); | |
114 | __tlb_flush_mm(mm); | |
115 | } | |
723cacbd GS |
116 | |
117 | pgd = mm->pgd; | |
118 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
119 | mm->context.asce_limit = 1UL << 31; | |
120 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | |
121 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; | |
122 | mm->task_size = mm->context.asce_limit; | |
123 | crst_table_free(mm, (unsigned long *) pgd); | |
124 | ||
1e133ab2 MS |
125 | if (current->active_mm == mm) |
126 | set_user_asce(mm); | |
127 | } | |
128 | ||
129 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | |
130 | { | |
131 | unsigned int old, new; | |
132 | ||
133 | do { | |
134 | old = atomic_read(v); | |
135 | new = old ^ bits; | |
136 | } while (atomic_cmpxchg(v, old, new) != old); | |
137 | return new; | |
138 | } | |
139 | ||
140 | /* | |
141 | * page table entry allocation/free routines. | |
142 | */ | |
143 | unsigned long *page_table_alloc(struct mm_struct *mm) | |
144 | { | |
145 | unsigned long *table; | |
146 | struct page *page; | |
147 | unsigned int mask, bit; | |
148 | ||
149 | /* Try to get a fragment of a 4K page as a 2K page table */ | |
150 | if (!mm_alloc_pgste(mm)) { | |
151 | table = NULL; | |
152 | spin_lock_bh(&mm->context.list_lock); | |
153 | if (!list_empty(&mm->context.pgtable_list)) { | |
154 | page = list_first_entry(&mm->context.pgtable_list, | |
155 | struct page, lru); | |
156 | mask = atomic_read(&page->_mapcount); | |
157 | mask = (mask | (mask >> 4)) & 3; | |
158 | if (mask != 3) { | |
159 | table = (unsigned long *) page_to_phys(page); | |
160 | bit = mask & 1; /* =1 -> second 2K */ | |
161 | if (bit) | |
162 | table += PTRS_PER_PTE; | |
163 | atomic_xor_bits(&page->_mapcount, 1U << bit); | |
164 | list_del(&page->lru); | |
165 | } | |
166 | } | |
167 | spin_unlock_bh(&mm->context.list_lock); | |
168 | if (table) | |
169 | return table; | |
170 | } | |
171 | /* Allocate a fresh page */ | |
10d58bf2 | 172 | page = alloc_page(GFP_KERNEL); |
1e133ab2 MS |
173 | if (!page) |
174 | return NULL; | |
175 | if (!pgtable_page_ctor(page)) { | |
176 | __free_page(page); | |
177 | return NULL; | |
178 | } | |
179 | /* Initialize page table */ | |
180 | table = (unsigned long *) page_to_phys(page); | |
181 | if (mm_alloc_pgste(mm)) { | |
182 | /* Return 4K page table with PGSTEs */ | |
183 | atomic_set(&page->_mapcount, 3); | |
184 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | |
185 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); | |
186 | } else { | |
187 | /* Return the first 2K fragment of the page */ | |
188 | atomic_set(&page->_mapcount, 1); | |
189 | clear_table(table, _PAGE_INVALID, PAGE_SIZE); | |
190 | spin_lock_bh(&mm->context.list_lock); | |
191 | list_add(&page->lru, &mm->context.pgtable_list); | |
192 | spin_unlock_bh(&mm->context.list_lock); | |
193 | } | |
194 | return table; | |
195 | } | |
196 | ||
197 | void page_table_free(struct mm_struct *mm, unsigned long *table) | |
198 | { | |
199 | struct page *page; | |
200 | unsigned int bit, mask; | |
201 | ||
202 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
203 | if (!mm_alloc_pgste(mm)) { | |
204 | /* Free 2K page table fragment of a 4K page */ | |
205 | bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); | |
206 | spin_lock_bh(&mm->context.list_lock); | |
207 | mask = atomic_xor_bits(&page->_mapcount, 1U << bit); | |
208 | if (mask & 3) | |
209 | list_add(&page->lru, &mm->context.pgtable_list); | |
210 | else | |
211 | list_del(&page->lru); | |
212 | spin_unlock_bh(&mm->context.list_lock); | |
213 | if (mask != 0) | |
214 | return; | |
215 | } | |
216 | ||
217 | pgtable_page_dtor(page); | |
218 | atomic_set(&page->_mapcount, -1); | |
219 | __free_page(page); | |
220 | } | |
221 | ||
222 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, | |
223 | unsigned long vmaddr) | |
224 | { | |
225 | struct mm_struct *mm; | |
226 | struct page *page; | |
227 | unsigned int bit, mask; | |
228 | ||
229 | mm = tlb->mm; | |
230 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
231 | if (mm_alloc_pgste(mm)) { | |
232 | gmap_unlink(mm, table, vmaddr); | |
233 | table = (unsigned long *) (__pa(table) | 3); | |
234 | tlb_remove_table(tlb, table); | |
235 | return; | |
236 | } | |
237 | bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); | |
238 | spin_lock_bh(&mm->context.list_lock); | |
239 | mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); | |
240 | if (mask & 3) | |
241 | list_add_tail(&page->lru, &mm->context.pgtable_list); | |
242 | else | |
243 | list_del(&page->lru); | |
244 | spin_unlock_bh(&mm->context.list_lock); | |
245 | table = (unsigned long *) (__pa(table) | (1U << bit)); | |
246 | tlb_remove_table(tlb, table); | |
247 | } | |
248 | ||
249 | static void __tlb_remove_table(void *_table) | |
250 | { | |
251 | unsigned int mask = (unsigned long) _table & 3; | |
252 | void *table = (void *)((unsigned long) _table ^ mask); | |
253 | struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
254 | ||
255 | switch (mask) { | |
256 | case 0: /* pmd or pud */ | |
257 | free_pages((unsigned long) table, 2); | |
258 | break; | |
259 | case 1: /* lower 2K of a 4K page table */ | |
260 | case 2: /* higher 2K of a 4K page table */ | |
261 | if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) | |
262 | break; | |
263 | /* fallthrough */ | |
264 | case 3: /* 4K page table with pgstes */ | |
265 | pgtable_page_dtor(page); | |
266 | atomic_set(&page->_mapcount, -1); | |
267 | __free_page(page); | |
268 | break; | |
269 | } | |
270 | } | |
271 | ||
272 | static void tlb_remove_table_smp_sync(void *arg) | |
273 | { | |
274 | /* Simply deliver the interrupt */ | |
275 | } | |
276 | ||
277 | static void tlb_remove_table_one(void *table) | |
278 | { | |
279 | /* | |
280 | * This isn't an RCU grace period and hence the page-tables cannot be | |
281 | * assumed to be actually RCU-freed. | |
282 | * | |
283 | * It is however sufficient for software page-table walkers that rely | |
284 | * on IRQ disabling. See the comment near struct mmu_table_batch. | |
285 | */ | |
286 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
287 | __tlb_remove_table(table); | |
288 | } | |
289 | ||
290 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
291 | { | |
292 | struct mmu_table_batch *batch; | |
293 | int i; | |
294 | ||
295 | batch = container_of(head, struct mmu_table_batch, rcu); | |
296 | ||
297 | for (i = 0; i < batch->nr; i++) | |
298 | __tlb_remove_table(batch->tables[i]); | |
299 | ||
300 | free_page((unsigned long)batch); | |
301 | } | |
302 | ||
303 | void tlb_table_flush(struct mmu_gather *tlb) | |
304 | { | |
305 | struct mmu_table_batch **batch = &tlb->batch; | |
306 | ||
307 | if (*batch) { | |
308 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | |
309 | *batch = NULL; | |
310 | } | |
311 | } | |
312 | ||
313 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
314 | { | |
315 | struct mmu_table_batch **batch = &tlb->batch; | |
316 | ||
317 | tlb->mm->context.flush_mm = 1; | |
318 | if (*batch == NULL) { | |
319 | *batch = (struct mmu_table_batch *) | |
320 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
321 | if (*batch == NULL) { | |
322 | __tlb_flush_mm_lazy(tlb->mm); | |
323 | tlb_remove_table_one(table); | |
324 | return; | |
325 | } | |
326 | (*batch)->nr = 0; | |
327 | } | |
328 | (*batch)->tables[(*batch)->nr++] = table; | |
329 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
330 | tlb_flush_mmu(tlb); | |
331 | } |