2 * Page table allocation functions
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <asm/mmu_context.h>
11 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
18 static int page_table_allocate_pgste_min
= 0;
19 static int page_table_allocate_pgste_max
= 1;
20 int page_table_allocate_pgste
= 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste
);
23 static struct ctl_table page_table_sysctl
[] = {
25 .procname
= "allocate_pgste",
26 .data
= &page_table_allocate_pgste
,
27 .maxlen
= sizeof(int),
28 .mode
= S_IRUGO
| S_IWUSR
,
29 .proc_handler
= proc_dointvec
,
30 .extra1
= &page_table_allocate_pgste_min
,
31 .extra2
= &page_table_allocate_pgste_max
,
36 static struct ctl_table page_table_sysctl_dir
[] = {
41 .child
= page_table_sysctl
,
46 static int __init
page_table_register_sysctl(void)
48 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
50 __initcall(page_table_register_sysctl
);
52 #endif /* CONFIG_PGSTE */
54 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
56 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
60 return (unsigned long *) page_to_phys(page
);
63 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
65 free_pages((unsigned long) table
, 2);
68 static void __crst_table_upgrade(void *arg
)
70 struct mm_struct
*mm
= arg
;
72 if (current
->active_mm
== mm
) {
79 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long limit
)
81 unsigned long *table
, *pgd
;
85 BUG_ON(limit
> TASK_MAX_SIZE
);
88 table
= crst_table_alloc(mm
);
91 spin_lock_bh(&mm
->page_table_lock
);
92 if (mm
->context
.asce_limit
< limit
) {
93 pgd
= (unsigned long *) mm
->pgd
;
94 if (mm
->context
.asce_limit
<= (1UL << 31)) {
95 entry
= _REGION3_ENTRY_EMPTY
;
96 mm
->context
.asce_limit
= 1UL << 42;
97 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
101 entry
= _REGION2_ENTRY_EMPTY
;
102 mm
->context
.asce_limit
= 1UL << 53;
103 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
107 crst_table_init(table
, entry
);
108 pgd_populate(mm
, (pgd_t
*) table
, (pud_t
*) pgd
);
109 mm
->pgd
= (pgd_t
*) table
;
110 mm
->task_size
= mm
->context
.asce_limit
;
114 spin_unlock_bh(&mm
->page_table_lock
);
116 crst_table_free(mm
, table
);
117 if (mm
->context
.asce_limit
< limit
)
120 on_each_cpu(__crst_table_upgrade
, mm
, 0);
124 void crst_table_downgrade(struct mm_struct
*mm
, unsigned long limit
)
128 if (current
->active_mm
== mm
) {
132 while (mm
->context
.asce_limit
> limit
) {
134 switch (pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) {
135 case _REGION_ENTRY_TYPE_R2
:
136 mm
->context
.asce_limit
= 1UL << 42;
137 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
141 case _REGION_ENTRY_TYPE_R3
:
142 mm
->context
.asce_limit
= 1UL << 31;
143 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
150 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
151 mm
->task_size
= mm
->context
.asce_limit
;
152 crst_table_free(mm
, (unsigned long *) pgd
);
154 if (current
->active_mm
== mm
)
158 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
160 unsigned int old
, new;
163 old
= atomic_read(v
);
165 } while (atomic_cmpxchg(v
, old
, new) != old
);
170 * page table entry allocation/free routines.
172 unsigned long *page_table_alloc(struct mm_struct
*mm
)
174 unsigned long *table
;
176 unsigned int mask
, bit
;
178 /* Try to get a fragment of a 4K page as a 2K page table */
179 if (!mm_alloc_pgste(mm
)) {
181 spin_lock_bh(&mm
->context
.list_lock
);
182 if (!list_empty(&mm
->context
.pgtable_list
)) {
183 page
= list_first_entry(&mm
->context
.pgtable_list
,
185 mask
= atomic_read(&page
->_mapcount
);
186 mask
= (mask
| (mask
>> 4)) & 3;
188 table
= (unsigned long *) page_to_phys(page
);
189 bit
= mask
& 1; /* =1 -> second 2K */
191 table
+= PTRS_PER_PTE
;
192 atomic_xor_bits(&page
->_mapcount
, 1U << bit
);
193 list_del(&page
->lru
);
196 spin_unlock_bh(&mm
->context
.list_lock
);
200 /* Allocate a fresh page */
201 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
204 if (!pgtable_page_ctor(page
)) {
208 /* Initialize page table */
209 table
= (unsigned long *) page_to_phys(page
);
210 if (mm_alloc_pgste(mm
)) {
211 /* Return 4K page table with PGSTEs */
212 atomic_set(&page
->_mapcount
, 3);
213 clear_table(table
, _PAGE_INVALID
, PAGE_SIZE
/2);
214 clear_table(table
+ PTRS_PER_PTE
, 0, PAGE_SIZE
/2);
216 /* Return the first 2K fragment of the page */
217 atomic_set(&page
->_mapcount
, 1);
218 clear_table(table
, _PAGE_INVALID
, PAGE_SIZE
);
219 spin_lock_bh(&mm
->context
.list_lock
);
220 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
221 spin_unlock_bh(&mm
->context
.list_lock
);
226 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
229 unsigned int bit
, mask
;
231 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
232 if (!mm_alloc_pgste(mm
)) {
233 /* Free 2K page table fragment of a 4K page */
234 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
235 spin_lock_bh(&mm
->context
.list_lock
);
236 mask
= atomic_xor_bits(&page
->_mapcount
, 1U << bit
);
238 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
240 list_del(&page
->lru
);
241 spin_unlock_bh(&mm
->context
.list_lock
);
246 pgtable_page_dtor(page
);
247 atomic_set(&page
->_mapcount
, -1);
251 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
252 unsigned long vmaddr
)
254 struct mm_struct
*mm
;
256 unsigned int bit
, mask
;
259 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
260 if (mm_alloc_pgste(mm
)) {
261 gmap_unlink(mm
, table
, vmaddr
);
262 table
= (unsigned long *) (__pa(table
) | 3);
263 tlb_remove_table(tlb
, table
);
266 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
267 spin_lock_bh(&mm
->context
.list_lock
);
268 mask
= atomic_xor_bits(&page
->_mapcount
, 0x11U
<< bit
);
270 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
272 list_del(&page
->lru
);
273 spin_unlock_bh(&mm
->context
.list_lock
);
274 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
275 tlb_remove_table(tlb
, table
);
278 static void __tlb_remove_table(void *_table
)
280 unsigned int mask
= (unsigned long) _table
& 3;
281 void *table
= (void *)((unsigned long) _table
^ mask
);
282 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
285 case 0: /* pmd or pud */
286 free_pages((unsigned long) table
, 2);
288 case 1: /* lower 2K of a 4K page table */
289 case 2: /* higher 2K of a 4K page table */
290 if (atomic_xor_bits(&page
->_mapcount
, mask
<< 4) != 0)
293 case 3: /* 4K page table with pgstes */
294 pgtable_page_dtor(page
);
295 atomic_set(&page
->_mapcount
, -1);
301 static void tlb_remove_table_smp_sync(void *arg
)
303 /* Simply deliver the interrupt */
306 static void tlb_remove_table_one(void *table
)
309 * This isn't an RCU grace period and hence the page-tables cannot be
310 * assumed to be actually RCU-freed.
312 * It is however sufficient for software page-table walkers that rely
313 * on IRQ disabling. See the comment near struct mmu_table_batch.
315 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
316 __tlb_remove_table(table
);
319 static void tlb_remove_table_rcu(struct rcu_head
*head
)
321 struct mmu_table_batch
*batch
;
324 batch
= container_of(head
, struct mmu_table_batch
, rcu
);
326 for (i
= 0; i
< batch
->nr
; i
++)
327 __tlb_remove_table(batch
->tables
[i
]);
329 free_page((unsigned long)batch
);
332 void tlb_table_flush(struct mmu_gather
*tlb
)
334 struct mmu_table_batch
**batch
= &tlb
->batch
;
337 call_rcu_sched(&(*batch
)->rcu
, tlb_remove_table_rcu
);
342 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
344 struct mmu_table_batch
**batch
= &tlb
->batch
;
346 tlb
->mm
->context
.flush_mm
= 1;
347 if (*batch
== NULL
) {
348 *batch
= (struct mmu_table_batch
*)
349 __get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
350 if (*batch
== NULL
) {
351 __tlb_flush_mm_lazy(tlb
->mm
);
352 tlb_remove_table_one(table
);
357 (*batch
)->tables
[(*batch
)->nr
++] = table
;
358 if ((*batch
)->nr
== MAX_TABLE_BATCH
)