s390/lgr: Add init check to lgr_info_log()
[deliverable/linux.git] / arch / s390 / mm / pgtable.c
CommitLineData
3610cce8 1/*
388186bc 2 * Copyright IBM Corp. 2007,2011
3610cce8
MS
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
5a0e3ad6 9#include <linux/gfp.h>
3610cce8
MS
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
3610cce8
MS
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
80217147 18#include <linux/rcupdate.h>
e5992f2e 19#include <linux/slab.h>
3610cce8 20
3610cce8
MS
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
6252d702 25#include <asm/mmu_context.h>
3610cce8
MS
26
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
36409f63 29#define FRAG_MASK 0x0f
3610cce8
MS
30#else
31#define ALLOC_ORDER 2
36409f63 32#define FRAG_MASK 0x03
3610cce8
MS
33#endif
34
239a6425 35
043d0708 36unsigned long *crst_table_alloc(struct mm_struct *mm)
3610cce8
MS
37{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40 if (!page)
41 return NULL;
3610cce8
MS
42 return (unsigned long *) page_to_phys(page);
43}
44
80217147
MS
45void crst_table_free(struct mm_struct *mm, unsigned long *table)
46{
043d0708 47 free_pages((unsigned long) table, ALLOC_ORDER);
80217147
MS
48}
49
6252d702
MS
50#ifdef CONFIG_64BIT
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{
53 unsigned long *table, *pgd;
54 unsigned long entry;
55
56 BUG_ON(limit > (1UL << 53));
57repeat:
043d0708 58 table = crst_table_alloc(mm);
6252d702
MS
59 if (!table)
60 return -ENOMEM;
80217147 61 spin_lock_bh(&mm->page_table_lock);
6252d702
MS
62 if (mm->context.asce_limit < limit) {
63 pgd = (unsigned long *) mm->pgd;
64 if (mm->context.asce_limit <= (1UL << 31)) {
65 entry = _REGION3_ENTRY_EMPTY;
66 mm->context.asce_limit = 1UL << 42;
67 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68 _ASCE_USER_BITS |
69 _ASCE_TYPE_REGION3;
70 } else {
71 entry = _REGION2_ENTRY_EMPTY;
72 mm->context.asce_limit = 1UL << 53;
73 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74 _ASCE_USER_BITS |
75 _ASCE_TYPE_REGION2;
76 }
77 crst_table_init(table, entry);
78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79 mm->pgd = (pgd_t *) table;
f481bfaf 80 mm->task_size = mm->context.asce_limit;
6252d702
MS
81 table = NULL;
82 }
80217147 83 spin_unlock_bh(&mm->page_table_lock);
6252d702
MS
84 if (table)
85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit)
87 goto repeat;
88 update_mm(mm, current);
89 return 0;
90}
91
92void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
93{
94 pgd_t *pgd;
95
96 if (mm->context.asce_limit <= limit)
97 return;
98 __tlb_flush_mm(mm);
99 while (mm->context.asce_limit > limit) {
100 pgd = mm->pgd;
101 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
102 case _REGION_ENTRY_TYPE_R2:
103 mm->context.asce_limit = 1UL << 42;
104 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
105 _ASCE_USER_BITS |
106 _ASCE_TYPE_REGION3;
107 break;
108 case _REGION_ENTRY_TYPE_R3:
109 mm->context.asce_limit = 1UL << 31;
110 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS |
112 _ASCE_TYPE_SEGMENT;
113 break;
114 default:
115 BUG();
116 }
117 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
f481bfaf 118 mm->task_size = mm->context.asce_limit;
6252d702
MS
119 crst_table_free(mm, (unsigned long *) pgd);
120 }
121 update_mm(mm, current);
122}
123#endif
124
e5992f2e
MS
125#ifdef CONFIG_PGSTE
126
127/**
128 * gmap_alloc - allocate a guest address space
129 * @mm: pointer to the parent mm_struct
130 *
131 * Returns a guest address space structure.
132 */
133struct gmap *gmap_alloc(struct mm_struct *mm)
36409f63 134{
e5992f2e
MS
135 struct gmap *gmap;
136 struct page *page;
137 unsigned long *table;
36409f63 138
e5992f2e
MS
139 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
140 if (!gmap)
141 goto out;
142 INIT_LIST_HEAD(&gmap->crst_list);
143 gmap->mm = mm;
144 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
145 if (!page)
146 goto out_free;
147 list_add(&page->lru, &gmap->crst_list);
148 table = (unsigned long *) page_to_phys(page);
149 crst_table_init(table, _REGION1_ENTRY_EMPTY);
150 gmap->table = table;
480e5926
CB
151 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
152 _ASCE_USER_BITS | __pa(table);
e5992f2e
MS
153 list_add(&gmap->list, &mm->context.gmap_list);
154 return gmap;
155
156out_free:
157 kfree(gmap);
158out:
159 return NULL;
36409f63 160}
e5992f2e 161EXPORT_SYMBOL_GPL(gmap_alloc);
36409f63 162
e5992f2e
MS
163static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
164{
165 struct gmap_pgtable *mp;
166 struct gmap_rmap *rmap;
167 struct page *page;
168
169 if (*table & _SEGMENT_ENTRY_INV)
170 return 0;
171 page = pfn_to_page(*table >> PAGE_SHIFT);
172 mp = (struct gmap_pgtable *) page->index;
173 list_for_each_entry(rmap, &mp->mapper, list) {
174 if (rmap->entry != table)
175 continue;
176 list_del(&rmap->list);
177 kfree(rmap);
178 break;
179 }
180 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
181 return 1;
182}
183
184static void gmap_flush_tlb(struct gmap *gmap)
185{
186 if (MACHINE_HAS_IDTE)
187 __tlb_flush_idte((unsigned long) gmap->table |
188 _ASCE_TYPE_REGION1);
189 else
190 __tlb_flush_global();
191}
192
193/**
194 * gmap_free - free a guest address space
195 * @gmap: pointer to the guest address space structure
3610cce8 196 */
e5992f2e
MS
197void gmap_free(struct gmap *gmap)
198{
199 struct page *page, *next;
200 unsigned long *table;
201 int i;
202
203
204 /* Flush tlb. */
205 if (MACHINE_HAS_IDTE)
206 __tlb_flush_idte((unsigned long) gmap->table |
207 _ASCE_TYPE_REGION1);
208 else
209 __tlb_flush_global();
210
211 /* Free all segment & region tables. */
212 down_read(&gmap->mm->mmap_sem);
cc772456 213 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
214 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
215 table = (unsigned long *) page_to_phys(page);
216 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
217 /* Remove gmap rmap structures for segment table. */
218 for (i = 0; i < PTRS_PER_PMD; i++, table++)
219 gmap_unlink_segment(gmap, table);
220 __free_pages(page, ALLOC_ORDER);
221 }
cc772456 222 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
223 up_read(&gmap->mm->mmap_sem);
224 list_del(&gmap->list);
225 kfree(gmap);
226}
227EXPORT_SYMBOL_GPL(gmap_free);
228
229/**
230 * gmap_enable - switch primary space to the guest address space
231 * @gmap: pointer to the guest address space structure
232 */
233void gmap_enable(struct gmap *gmap)
234{
e5992f2e
MS
235 S390_lowcore.gmap = (unsigned long) gmap;
236}
237EXPORT_SYMBOL_GPL(gmap_enable);
238
239/**
240 * gmap_disable - switch back to the standard primary address space
241 * @gmap: pointer to the guest address space structure
242 */
243void gmap_disable(struct gmap *gmap)
244{
e5992f2e
MS
245 S390_lowcore.gmap = 0UL;
246}
247EXPORT_SYMBOL_GPL(gmap_disable);
248
a9162f23
CO
249/*
250 * gmap_alloc_table is assumed to be called with mmap_sem held
251 */
e5992f2e
MS
252static int gmap_alloc_table(struct gmap *gmap,
253 unsigned long *table, unsigned long init)
254{
255 struct page *page;
256 unsigned long *new;
257
c86cce2a
CB
258 /* since we dont free the gmap table until gmap_free we can unlock */
259 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e 260 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
c86cce2a 261 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
262 if (!page)
263 return -ENOMEM;
264 new = (unsigned long *) page_to_phys(page);
265 crst_table_init(new, init);
e5992f2e
MS
266 if (*table & _REGION_ENTRY_INV) {
267 list_add(&page->lru, &gmap->crst_list);
268 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
269 (*table & _REGION_ENTRY_TYPE_MASK);
270 } else
271 __free_pages(page, ALLOC_ORDER);
e5992f2e
MS
272 return 0;
273}
274
275/**
276 * gmap_unmap_segment - unmap segment from the guest address space
277 * @gmap: pointer to the guest address space structure
278 * @addr: address in the guest address space
279 * @len: length of the memory area to unmap
280 *
281 * Returns 0 if the unmap succeded, -EINVAL if not.
282 */
283int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
284{
285 unsigned long *table;
286 unsigned long off;
287 int flush;
288
289 if ((to | len) & (PMD_SIZE - 1))
290 return -EINVAL;
291 if (len == 0 || to + len < to)
292 return -EINVAL;
293
294 flush = 0;
295 down_read(&gmap->mm->mmap_sem);
cc772456 296 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
297 for (off = 0; off < len; off += PMD_SIZE) {
298 /* Walk the guest addr space page table */
299 table = gmap->table + (((to + off) >> 53) & 0x7ff);
300 if (*table & _REGION_ENTRY_INV)
05873df9 301 goto out;
e5992f2e
MS
302 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
303 table = table + (((to + off) >> 42) & 0x7ff);
304 if (*table & _REGION_ENTRY_INV)
05873df9 305 goto out;
e5992f2e
MS
306 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
307 table = table + (((to + off) >> 31) & 0x7ff);
308 if (*table & _REGION_ENTRY_INV)
05873df9 309 goto out;
e5992f2e
MS
310 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
311 table = table + (((to + off) >> 20) & 0x7ff);
312
313 /* Clear segment table entry in guest address space. */
314 flush |= gmap_unlink_segment(gmap, table);
315 *table = _SEGMENT_ENTRY_INV;
316 }
05873df9 317out:
cc772456 318 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
319 up_read(&gmap->mm->mmap_sem);
320 if (flush)
321 gmap_flush_tlb(gmap);
322 return 0;
323}
324EXPORT_SYMBOL_GPL(gmap_unmap_segment);
325
326/**
327 * gmap_mmap_segment - map a segment to the guest address space
328 * @gmap: pointer to the guest address space structure
329 * @from: source address in the parent address space
330 * @to: target address in the guest address space
331 *
332 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
333 */
334int gmap_map_segment(struct gmap *gmap, unsigned long from,
335 unsigned long to, unsigned long len)
336{
337 unsigned long *table;
338 unsigned long off;
339 int flush;
340
341 if ((from | to | len) & (PMD_SIZE - 1))
342 return -EINVAL;
343 if (len == 0 || from + len > PGDIR_SIZE ||
344 from + len < from || to + len < to)
345 return -EINVAL;
346
347 flush = 0;
348 down_read(&gmap->mm->mmap_sem);
cc772456 349 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
350 for (off = 0; off < len; off += PMD_SIZE) {
351 /* Walk the gmap address space page table */
352 table = gmap->table + (((to + off) >> 53) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) &&
354 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 42) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) &&
359 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
360 goto out_unmap;
361 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
362 table = table + (((to + off) >> 31) & 0x7ff);
363 if ((*table & _REGION_ENTRY_INV) &&
364 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
365 goto out_unmap;
366 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
367 table = table + (((to + off) >> 20) & 0x7ff);
368
369 /* Store 'from' address in an invalid segment table entry. */
370 flush |= gmap_unlink_segment(gmap, table);
371 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
372 }
cc772456 373 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
374 up_read(&gmap->mm->mmap_sem);
375 if (flush)
376 gmap_flush_tlb(gmap);
377 return 0;
378
379out_unmap:
cc772456 380 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
381 up_read(&gmap->mm->mmap_sem);
382 gmap_unmap_segment(gmap, to, len);
383 return -ENOMEM;
384}
385EXPORT_SYMBOL_GPL(gmap_map_segment);
386
499069e1
CO
387/*
388 * this function is assumed to be called with mmap_sem held
389 */
390unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
e5992f2e
MS
391{
392 unsigned long *table, vmaddr, segment;
393 struct mm_struct *mm;
394 struct gmap_pgtable *mp;
395 struct gmap_rmap *rmap;
396 struct vm_area_struct *vma;
397 struct page *page;
398 pgd_t *pgd;
399 pud_t *pud;
400 pmd_t *pmd;
401
402 current->thread.gmap_addr = address;
403 mm = gmap->mm;
404 /* Walk the gmap address space page table */
405 table = gmap->table + ((address >> 53) & 0x7ff);
406 if (unlikely(*table & _REGION_ENTRY_INV))
407 return -EFAULT;
408 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
409 table = table + ((address >> 42) & 0x7ff);
410 if (unlikely(*table & _REGION_ENTRY_INV))
411 return -EFAULT;
412 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
413 table = table + ((address >> 31) & 0x7ff);
414 if (unlikely(*table & _REGION_ENTRY_INV))
415 return -EFAULT;
416 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
417 table = table + ((address >> 20) & 0x7ff);
418
419 /* Convert the gmap address to an mm address. */
420 segment = *table;
421 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
422 page = pfn_to_page(segment >> PAGE_SHIFT);
423 mp = (struct gmap_pgtable *) page->index;
424 return mp->vmaddr | (address & ~PMD_MASK);
425 } else if (segment & _SEGMENT_ENTRY_RO) {
426 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
427 vma = find_vma(mm, vmaddr);
428 if (!vma || vma->vm_start > vmaddr)
429 return -EFAULT;
430
431 /* Walk the parent mm page table */
432 pgd = pgd_offset(mm, vmaddr);
433 pud = pud_alloc(mm, pgd, vmaddr);
434 if (!pud)
435 return -ENOMEM;
436 pmd = pmd_alloc(mm, pud, vmaddr);
437 if (!pmd)
438 return -ENOMEM;
439 if (!pmd_present(*pmd) &&
440 __pte_alloc(mm, vma, pmd, vmaddr))
441 return -ENOMEM;
442 /* pmd now points to a valid segment table entry. */
443 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
444 if (!rmap)
445 return -ENOMEM;
446 /* Link gmap segment table entry location to page table. */
447 page = pmd_page(*pmd);
448 mp = (struct gmap_pgtable *) page->index;
449 rmap->entry = table;
cc772456 450 spin_lock(&mm->page_table_lock);
e5992f2e 451 list_add(&rmap->list, &mp->mapper);
cc772456 452 spin_unlock(&mm->page_table_lock);
e5992f2e
MS
453 /* Set gmap segment table entry to page table. */
454 *table = pmd_val(*pmd) & PAGE_MASK;
455 return vmaddr | (address & ~PMD_MASK);
456 }
457 return -EFAULT;
499069e1
CO
458}
459
460unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
461{
462 unsigned long rc;
463
464 down_read(&gmap->mm->mmap_sem);
465 rc = __gmap_fault(address, gmap);
466 up_read(&gmap->mm->mmap_sem);
e5992f2e 467
499069e1 468 return rc;
e5992f2e
MS
469}
470EXPORT_SYMBOL_GPL(gmap_fault);
471
388186bc
CB
472void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
473{
474
475 unsigned long *table, address, size;
476 struct vm_area_struct *vma;
477 struct gmap_pgtable *mp;
478 struct page *page;
479
480 down_read(&gmap->mm->mmap_sem);
481 address = from;
482 while (address < to) {
483 /* Walk the gmap address space page table */
484 table = gmap->table + ((address >> 53) & 0x7ff);
485 if (unlikely(*table & _REGION_ENTRY_INV)) {
486 address = (address + PMD_SIZE) & PMD_MASK;
487 continue;
488 }
489 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
490 table = table + ((address >> 42) & 0x7ff);
491 if (unlikely(*table & _REGION_ENTRY_INV)) {
492 address = (address + PMD_SIZE) & PMD_MASK;
493 continue;
494 }
495 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
496 table = table + ((address >> 31) & 0x7ff);
497 if (unlikely(*table & _REGION_ENTRY_INV)) {
498 address = (address + PMD_SIZE) & PMD_MASK;
499 continue;
500 }
501 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
502 table = table + ((address >> 20) & 0x7ff);
503 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
504 address = (address + PMD_SIZE) & PMD_MASK;
505 continue;
506 }
507 page = pfn_to_page(*table >> PAGE_SHIFT);
508 mp = (struct gmap_pgtable *) page->index;
509 vma = find_vma(gmap->mm, mp->vmaddr);
510 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
511 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
512 size, NULL);
513 address = (address + PMD_SIZE) & PMD_MASK;
514 }
515 up_read(&gmap->mm->mmap_sem);
516}
517EXPORT_SYMBOL_GPL(gmap_discard);
518
e5992f2e
MS
519void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
520{
521 struct gmap_rmap *rmap, *next;
522 struct gmap_pgtable *mp;
523 struct page *page;
524 int flush;
525
526 flush = 0;
527 spin_lock(&mm->page_table_lock);
528 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
529 mp = (struct gmap_pgtable *) page->index;
530 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
531 *rmap->entry =
532 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
533 list_del(&rmap->list);
534 kfree(rmap);
535 flush = 1;
536 }
537 spin_unlock(&mm->page_table_lock);
538 if (flush)
539 __tlb_flush_global();
540}
541
542static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
543 unsigned long vmaddr)
36409f63
MS
544{
545 struct page *page;
546 unsigned long *table;
e5992f2e 547 struct gmap_pgtable *mp;
36409f63
MS
548
549 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
550 if (!page)
551 return NULL;
e5992f2e
MS
552 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
553 if (!mp) {
554 __free_page(page);
555 return NULL;
556 }
36409f63 557 pgtable_page_ctor(page);
e5992f2e
MS
558 mp->vmaddr = vmaddr & PMD_MASK;
559 INIT_LIST_HEAD(&mp->mapper);
560 page->index = (unsigned long) mp;
36409f63
MS
561 atomic_set(&page->_mapcount, 3);
562 table = (unsigned long *) page_to_phys(page);
563 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
564 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
565 return table;
566}
567
568static inline void page_table_free_pgste(unsigned long *table)
569{
570 struct page *page;
e5992f2e 571 struct gmap_pgtable *mp;
36409f63
MS
572
573 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
e5992f2e
MS
574 mp = (struct gmap_pgtable *) page->index;
575 BUG_ON(!list_empty(&mp->mapper));
2320c579 576 pgtable_page_dtor(page);
36409f63 577 atomic_set(&page->_mapcount, -1);
e5992f2e 578 kfree(mp);
36409f63
MS
579 __free_page(page);
580}
36409f63 581
e5992f2e
MS
582#else /* CONFIG_PGSTE */
583
584static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
585 unsigned long vmaddr)
586{
944291de 587 return NULL;
e5992f2e
MS
588}
589
590static inline void page_table_free_pgste(unsigned long *table)
591{
592}
593
594static inline void gmap_unmap_notifier(struct mm_struct *mm,
595 unsigned long *table)
596{
597}
598
599#endif /* CONFIG_PGSTE */
600
601static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
602{
603 unsigned int old, new;
604
605 do {
606 old = atomic_read(v);
607 new = old ^ bits;
608 } while (atomic_cmpxchg(v, old, new) != old);
609 return new;
610}
611
612/*
613 * page table entry allocation/free routines.
614 */
615unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
3610cce8 616{
146e4b3c 617 struct page *page;
3610cce8 618 unsigned long *table;
36409f63 619 unsigned int mask, bit;
3610cce8 620
36409f63 621 if (mm_has_pgste(mm))
e5992f2e 622 return page_table_alloc_pgste(mm, vmaddr);
36409f63 623 /* Allocate fragments of a 4K page as 1K/2K page table */
80217147 624 spin_lock_bh(&mm->context.list_lock);
36409f63 625 mask = FRAG_MASK;
146e4b3c
MS
626 if (!list_empty(&mm->context.pgtable_list)) {
627 page = list_first_entry(&mm->context.pgtable_list,
628 struct page, lru);
36409f63
MS
629 table = (unsigned long *) page_to_phys(page);
630 mask = atomic_read(&page->_mapcount);
631 mask = mask | (mask >> 4);
146e4b3c 632 }
36409f63 633 if ((mask & FRAG_MASK) == FRAG_MASK) {
80217147 634 spin_unlock_bh(&mm->context.list_lock);
146e4b3c
MS
635 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
636 if (!page)
3610cce8 637 return NULL;
146e4b3c 638 pgtable_page_ctor(page);
36409f63 639 atomic_set(&page->_mapcount, 1);
146e4b3c 640 table = (unsigned long *) page_to_phys(page);
36409f63 641 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
80217147 642 spin_lock_bh(&mm->context.list_lock);
146e4b3c 643 list_add(&page->lru, &mm->context.pgtable_list);
36409f63
MS
644 } else {
645 for (bit = 1; mask & bit; bit <<= 1)
646 table += PTRS_PER_PTE;
647 mask = atomic_xor_bits(&page->_mapcount, bit);
648 if ((mask & FRAG_MASK) == FRAG_MASK)
649 list_del(&page->lru);
3610cce8 650 }
80217147 651 spin_unlock_bh(&mm->context.list_lock);
3610cce8
MS
652 return table;
653}
654
36409f63 655void page_table_free(struct mm_struct *mm, unsigned long *table)
80217147
MS
656{
657 struct page *page;
36409f63 658 unsigned int bit, mask;
80217147 659
e5992f2e
MS
660 if (mm_has_pgste(mm)) {
661 gmap_unmap_notifier(mm, table);
36409f63 662 return page_table_free_pgste(table);
e5992f2e 663 }
36409f63 664 /* Free 1K/2K page table fragment of a 4K page */
80217147 665 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
36409f63
MS
666 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
667 spin_lock_bh(&mm->context.list_lock);
668 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
669 list_del(&page->lru);
670 mask = atomic_xor_bits(&page->_mapcount, bit);
671 if (mask & FRAG_MASK)
672 list_add(&page->lru, &mm->context.pgtable_list);
673 spin_unlock_bh(&mm->context.list_lock);
674 if (mask == 0) {
80217147 675 pgtable_page_dtor(page);
36409f63 676 atomic_set(&page->_mapcount, -1);
80217147
MS
677 __free_page(page);
678 }
679}
680
36409f63 681static void __page_table_free_rcu(void *table, unsigned bit)
3610cce8 682{
146e4b3c 683 struct page *page;
3610cce8 684
36409f63
MS
685 if (bit == FRAG_MASK)
686 return page_table_free_pgste(table);
36409f63 687 /* Free 1K/2K page table fragment of a 4K page */
146e4b3c 688 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
36409f63 689 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
146e4b3c 690 pgtable_page_dtor(page);
36409f63 691 atomic_set(&page->_mapcount, -1);
146e4b3c
MS
692 __free_page(page);
693 }
694}
3610cce8 695
36409f63 696void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
80217147 697{
36409f63 698 struct mm_struct *mm;
80217147 699 struct page *page;
36409f63 700 unsigned int bit, mask;
80217147 701
36409f63 702 mm = tlb->mm;
36409f63 703 if (mm_has_pgste(mm)) {
e5992f2e 704 gmap_unmap_notifier(mm, table);
36409f63
MS
705 table = (unsigned long *) (__pa(table) | FRAG_MASK);
706 tlb_remove_table(tlb, table);
707 return;
80217147 708 }
36409f63 709 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
80217147
MS
710 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
711 spin_lock_bh(&mm->context.list_lock);
36409f63
MS
712 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
713 list_del(&page->lru);
714 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
715 if (mask & FRAG_MASK)
716 list_add_tail(&page->lru, &mm->context.pgtable_list);
80217147 717 spin_unlock_bh(&mm->context.list_lock);
36409f63
MS
718 table = (unsigned long *) (__pa(table) | (bit << 4));
719 tlb_remove_table(tlb, table);
720}
721
722void __tlb_remove_table(void *_table)
723{
e73b7fff
MS
724 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
725 void *table = (void *)((unsigned long) _table & ~mask);
726 unsigned type = (unsigned long) _table & mask;
36409f63
MS
727
728 if (type)
729 __page_table_free_rcu(table, type);
730 else
731 free_pages((unsigned long) table, ALLOC_ORDER);
80217147
MS
732}
733
cd94154c
MS
734static void tlb_remove_table_smp_sync(void *arg)
735{
736 /* Simply deliver the interrupt */
737}
738
739static void tlb_remove_table_one(void *table)
740{
741 /*
742 * This isn't an RCU grace period and hence the page-tables cannot be
743 * assumed to be actually RCU-freed.
744 *
745 * It is however sufficient for software page-table walkers that rely
746 * on IRQ disabling. See the comment near struct mmu_table_batch.
747 */
748 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
749 __tlb_remove_table(table);
750}
751
752static void tlb_remove_table_rcu(struct rcu_head *head)
753{
754 struct mmu_table_batch *batch;
755 int i;
756
757 batch = container_of(head, struct mmu_table_batch, rcu);
758
759 for (i = 0; i < batch->nr; i++)
760 __tlb_remove_table(batch->tables[i]);
761
762 free_page((unsigned long)batch);
763}
764
765void tlb_table_flush(struct mmu_gather *tlb)
766{
767 struct mmu_table_batch **batch = &tlb->batch;
768
769 if (*batch) {
770 __tlb_flush_mm(tlb->mm);
771 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
772 *batch = NULL;
773 }
774}
775
776void tlb_remove_table(struct mmu_gather *tlb, void *table)
777{
778 struct mmu_table_batch **batch = &tlb->batch;
779
780 if (*batch == NULL) {
781 *batch = (struct mmu_table_batch *)
782 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
783 if (*batch == NULL) {
784 __tlb_flush_mm(tlb->mm);
785 tlb_remove_table_one(table);
786 return;
787 }
788 (*batch)->nr = 0;
789 }
790 (*batch)->tables[(*batch)->nr++] = table;
791 if ((*batch)->nr == MAX_TABLE_BATCH)
792 tlb_table_flush(tlb);
793}
36409f63 794
402b0862
CO
795/*
796 * switch on pgstes for its userspace process (for kvm)
797 */
798int s390_enable_sie(void)
799{
800 struct task_struct *tsk = current;
74b6b522 801 struct mm_struct *mm, *old_mm;
402b0862 802
702d9e58 803 /* Do we have switched amode? If no, we cannot do sie */
b11b5334 804 if (user_mode == HOME_SPACE_MODE)
702d9e58
CO
805 return -EINVAL;
806
74b6b522 807 /* Do we have pgstes? if yes, we are done */
36409f63 808 if (mm_has_pgste(tsk->mm))
74b6b522 809 return 0;
402b0862 810
74b6b522
CB
811 /* lets check if we are allowed to replace the mm */
812 task_lock(tsk);
402b0862 813 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
52a21f2c
MS
814#ifdef CONFIG_AIO
815 !hlist_empty(&tsk->mm->ioctx_list) ||
816#endif
817 tsk->mm != tsk->active_mm) {
74b6b522
CB
818 task_unlock(tsk);
819 return -EINVAL;
820 }
821 task_unlock(tsk);
402b0862 822
250cf776
CB
823 /* we copy the mm and let dup_mm create the page tables with_pgstes */
824 tsk->mm->context.alloc_pgste = 1;
2739b6d1
CB
825 /* make sure that both mms have a correct rss state */
826 sync_mm_rss(tsk->mm);
402b0862 827 mm = dup_mm(tsk);
250cf776 828 tsk->mm->context.alloc_pgste = 0;
402b0862 829 if (!mm)
74b6b522
CB
830 return -ENOMEM;
831
250cf776 832 /* Now lets check again if something happened */
74b6b522
CB
833 task_lock(tsk);
834 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
52a21f2c
MS
835#ifdef CONFIG_AIO
836 !hlist_empty(&tsk->mm->ioctx_list) ||
837#endif
838 tsk->mm != tsk->active_mm) {
74b6b522
CB
839 mmput(mm);
840 task_unlock(tsk);
841 return -EINVAL;
842 }
843
844 /* ok, we are alone. No ptrace, no threads, etc. */
845 old_mm = tsk->mm;
402b0862
CO
846 tsk->mm = tsk->active_mm = mm;
847 preempt_disable();
848 update_mm(mm, tsk);
e05ef9bd
CB
849 atomic_inc(&mm->context.attach_count);
850 atomic_dec(&old_mm->context.attach_count);
005f8eee 851 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
402b0862 852 preempt_enable();
402b0862 853 task_unlock(tsk);
74b6b522
CB
854 mmput(old_mm);
855 return 0;
402b0862
CO
856}
857EXPORT_SYMBOL_GPL(s390_enable_sie);
7db11a36 858
87458ff4 859#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
7db11a36
HJP
860bool kernel_page_present(struct page *page)
861{
862 unsigned long addr;
863 int cc;
864
865 addr = page_to_phys(page);
87458ff4
HC
866 asm volatile(
867 " lra %1,0(%1)\n"
868 " ipm %0\n"
869 " srl %0,28"
870 : "=d" (cc), "+a" (addr) : : "cc");
7db11a36
HJP
871 return cc == 0;
872}
87458ff4 873#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
This page took 0.36704 seconds and 5 git commands to generate.