2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
24 * gmap_alloc - allocate a guest address space
25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum address of the gmap address space
28 * Returns a guest address space structure.
30 struct gmap
*gmap_alloc(struct mm_struct
*mm
, unsigned long limit
)
35 unsigned long etype
, atype
;
37 if (limit
< (1UL << 31)) {
38 limit
= (1UL << 31) - 1;
39 atype
= _ASCE_TYPE_SEGMENT
;
40 etype
= _SEGMENT_ENTRY_EMPTY
;
41 } else if (limit
< (1UL << 42)) {
42 limit
= (1UL << 42) - 1;
43 atype
= _ASCE_TYPE_REGION3
;
44 etype
= _REGION3_ENTRY_EMPTY
;
45 } else if (limit
< (1UL << 53)) {
46 limit
= (1UL << 53) - 1;
47 atype
= _ASCE_TYPE_REGION2
;
48 etype
= _REGION2_ENTRY_EMPTY
;
51 atype
= _ASCE_TYPE_REGION1
;
52 etype
= _REGION1_ENTRY_EMPTY
;
54 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
57 INIT_LIST_HEAD(&gmap
->crst_list
);
58 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
59 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
60 spin_lock_init(&gmap
->guest_table_lock
);
62 page
= alloc_pages(GFP_KERNEL
, 2);
66 list_add(&page
->lru
, &gmap
->crst_list
);
67 table
= (unsigned long *) page_to_phys(page
);
68 crst_table_init(table
, etype
);
70 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
71 _ASCE_USER_BITS
| __pa(table
);
72 gmap
->asce_end
= limit
;
73 down_write(&mm
->mmap_sem
);
74 list_add(&gmap
->list
, &mm
->context
.gmap_list
);
75 up_write(&mm
->mmap_sem
);
83 EXPORT_SYMBOL_GPL(gmap_alloc
);
85 static void gmap_flush_tlb(struct gmap
*gmap
)
88 __tlb_flush_idte(gmap
->asce
);
93 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
95 struct radix_tree_iter iter
;
96 unsigned long indices
[16];
101 /* A radix tree is freed by deleting all of its entries */
105 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
106 indices
[nr
] = iter
.index
;
110 for (i
= 0; i
< nr
; i
++) {
112 radix_tree_delete(root
, index
);
118 * gmap_free - free a guest address space
119 * @gmap: pointer to the guest address space structure
121 void gmap_free(struct gmap
*gmap
)
123 struct page
*page
, *next
;
126 if (MACHINE_HAS_IDTE
)
127 __tlb_flush_idte(gmap
->asce
);
129 __tlb_flush_global();
131 /* Free all segment & region tables. */
132 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
133 __free_pages(page
, 2);
134 gmap_radix_tree_free(&gmap
->guest_to_host
);
135 gmap_radix_tree_free(&gmap
->host_to_guest
);
136 down_write(&gmap
->mm
->mmap_sem
);
137 list_del(&gmap
->list
);
138 up_write(&gmap
->mm
->mmap_sem
);
141 EXPORT_SYMBOL_GPL(gmap_free
);
144 * gmap_enable - switch primary space to the guest address space
145 * @gmap: pointer to the guest address space structure
147 void gmap_enable(struct gmap
*gmap
)
149 S390_lowcore
.gmap
= (unsigned long) gmap
;
151 EXPORT_SYMBOL_GPL(gmap_enable
);
154 * gmap_disable - switch back to the standard primary address space
155 * @gmap: pointer to the guest address space structure
157 void gmap_disable(struct gmap
*gmap
)
159 S390_lowcore
.gmap
= 0UL;
161 EXPORT_SYMBOL_GPL(gmap_disable
);
164 * gmap_alloc_table is assumed to be called with mmap_sem held
166 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
167 unsigned long init
, unsigned long gaddr
)
172 /* since we dont free the gmap table until gmap_free we can unlock */
173 page
= alloc_pages(GFP_KERNEL
, 2);
176 new = (unsigned long *) page_to_phys(page
);
177 crst_table_init(new, init
);
178 spin_lock(&gmap
->mm
->page_table_lock
);
179 if (*table
& _REGION_ENTRY_INVALID
) {
180 list_add(&page
->lru
, &gmap
->crst_list
);
181 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
182 (*table
& _REGION_ENTRY_TYPE_MASK
);
186 spin_unlock(&gmap
->mm
->page_table_lock
);
188 __free_pages(page
, 2);
193 * __gmap_segment_gaddr - find virtual address from segment pointer
194 * @entry: pointer to a segment table entry in the guest address space
196 * Returns the virtual address in the guest address space for the segment
198 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
201 unsigned long offset
, mask
;
203 offset
= (unsigned long) entry
/ sizeof(unsigned long);
204 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
205 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
206 page
= virt_to_page((void *)((unsigned long) entry
& mask
));
207 return page
->index
+ offset
;
211 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
212 * @gmap: pointer to the guest address space structure
213 * @vmaddr: address in the host process address space
215 * Returns 1 if a TLB flush is required
217 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
219 unsigned long *entry
;
222 spin_lock(&gmap
->guest_table_lock
);
223 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
225 flush
= (*entry
!= _SEGMENT_ENTRY_INVALID
);
226 *entry
= _SEGMENT_ENTRY_INVALID
;
228 spin_unlock(&gmap
->guest_table_lock
);
233 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
234 * @gmap: pointer to the guest address space structure
235 * @gaddr: address in the guest address space
237 * Returns 1 if a TLB flush is required
239 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
241 unsigned long vmaddr
;
243 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
245 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
249 * gmap_unmap_segment - unmap segment from the guest address space
250 * @gmap: pointer to the guest address space structure
251 * @to: address in the guest address space
252 * @len: length of the memory area to unmap
254 * Returns 0 if the unmap succeeded, -EINVAL if not.
256 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
261 if ((to
| len
) & (PMD_SIZE
- 1))
263 if (len
== 0 || to
+ len
< to
)
267 down_write(&gmap
->mm
->mmap_sem
);
268 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
269 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
270 up_write(&gmap
->mm
->mmap_sem
);
272 gmap_flush_tlb(gmap
);
275 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
278 * gmap_map_segment - map a segment to the guest address space
279 * @gmap: pointer to the guest address space structure
280 * @from: source address in the parent address space
281 * @to: target address in the guest address space
282 * @len: length of the memory area to map
284 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
286 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
287 unsigned long to
, unsigned long len
)
292 if ((from
| to
| len
) & (PMD_SIZE
- 1))
294 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
295 from
+ len
- 1 > TASK_MAX_SIZE
|| to
+ len
- 1 > gmap
->asce_end
)
299 down_write(&gmap
->mm
->mmap_sem
);
300 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
301 /* Remove old translation */
302 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
303 /* Store new translation */
304 if (radix_tree_insert(&gmap
->guest_to_host
,
305 (to
+ off
) >> PMD_SHIFT
,
306 (void *) from
+ off
))
309 up_write(&gmap
->mm
->mmap_sem
);
311 gmap_flush_tlb(gmap
);
314 gmap_unmap_segment(gmap
, to
, len
);
317 EXPORT_SYMBOL_GPL(gmap_map_segment
);
320 * __gmap_translate - translate a guest address to a user space address
321 * @gmap: pointer to guest mapping meta data structure
322 * @gaddr: guest address
324 * Returns user space address which corresponds to the guest address or
325 * -EFAULT if no such mapping exists.
326 * This function does not establish potentially missing page table entries.
327 * The mmap_sem of the mm that belongs to the address space must be held
328 * when this function gets called.
330 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
332 unsigned long vmaddr
;
334 vmaddr
= (unsigned long)
335 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
336 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
338 EXPORT_SYMBOL_GPL(__gmap_translate
);
341 * gmap_translate - translate a guest address to a user space address
342 * @gmap: pointer to guest mapping meta data structure
343 * @gaddr: guest address
345 * Returns user space address which corresponds to the guest address or
346 * -EFAULT if no such mapping exists.
347 * This function does not establish potentially missing page table entries.
349 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
353 down_read(&gmap
->mm
->mmap_sem
);
354 rc
= __gmap_translate(gmap
, gaddr
);
355 up_read(&gmap
->mm
->mmap_sem
);
358 EXPORT_SYMBOL_GPL(gmap_translate
);
361 * gmap_unlink - disconnect a page table from the gmap shadow tables
362 * @gmap: pointer to guest mapping meta data structure
363 * @table: pointer to the host page table
364 * @vmaddr: vm address associated with the host page table
366 void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
367 unsigned long vmaddr
)
372 list_for_each_entry(gmap
, &mm
->context
.gmap_list
, list
) {
373 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
375 gmap_flush_tlb(gmap
);
380 * gmap_link - set up shadow page tables to connect a host to a guest address
381 * @gmap: pointer to guest mapping meta data structure
382 * @gaddr: guest address
383 * @vmaddr: vm address
385 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
386 * if the vm address is already mapped to a different guest segment.
387 * The mmap_sem of the mm that belongs to the address space must be held
388 * when this function gets called.
390 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
392 struct mm_struct
*mm
;
393 unsigned long *table
;
400 /* Create higher level tables in the gmap page table */
402 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
403 table
+= (gaddr
>> 53) & 0x7ff;
404 if ((*table
& _REGION_ENTRY_INVALID
) &&
405 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
406 gaddr
& 0xffe0000000000000UL
))
408 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
410 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
411 table
+= (gaddr
>> 42) & 0x7ff;
412 if ((*table
& _REGION_ENTRY_INVALID
) &&
413 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
414 gaddr
& 0xfffffc0000000000UL
))
416 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
418 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
419 table
+= (gaddr
>> 31) & 0x7ff;
420 if ((*table
& _REGION_ENTRY_INVALID
) &&
421 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
422 gaddr
& 0xffffffff80000000UL
))
424 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
426 table
+= (gaddr
>> 20) & 0x7ff;
427 /* Walk the parent mm page table */
429 pgd
= pgd_offset(mm
, vmaddr
);
430 VM_BUG_ON(pgd_none(*pgd
));
431 pud
= pud_offset(pgd
, vmaddr
);
432 VM_BUG_ON(pud_none(*pud
));
433 /* large puds cannot yet be handled */
436 pmd
= pmd_offset(pud
, vmaddr
);
437 VM_BUG_ON(pmd_none(*pmd
));
438 /* large pmds cannot yet be handled */
441 /* Link gmap segment table entry location to page table. */
442 rc
= radix_tree_preload(GFP_KERNEL
);
445 ptl
= pmd_lock(mm
, pmd
);
446 spin_lock(&gmap
->guest_table_lock
);
447 if (*table
== _SEGMENT_ENTRY_INVALID
) {
448 rc
= radix_tree_insert(&gmap
->host_to_guest
,
449 vmaddr
>> PMD_SHIFT
, table
);
451 *table
= pmd_val(*pmd
);
454 spin_unlock(&gmap
->guest_table_lock
);
456 radix_tree_preload_end();
461 * gmap_fault - resolve a fault on a guest address
462 * @gmap: pointer to guest mapping meta data structure
463 * @gaddr: guest address
464 * @fault_flags: flags to pass down to handle_mm_fault()
466 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
467 * if the vm address is already mapped to a different guest segment.
469 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
470 unsigned int fault_flags
)
472 unsigned long vmaddr
;
476 down_read(&gmap
->mm
->mmap_sem
);
480 vmaddr
= __gmap_translate(gmap
, gaddr
);
481 if (IS_ERR_VALUE(vmaddr
)) {
485 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
,
491 * In the case that fixup_user_fault unlocked the mmap_sem during
492 * faultin redo __gmap_translate to not race with a map/unmap_segment.
497 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
499 up_read(&gmap
->mm
->mmap_sem
);
502 EXPORT_SYMBOL_GPL(gmap_fault
);
505 * this function is assumed to be called with mmap_sem held
507 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
509 unsigned long vmaddr
;
513 /* Find the vm address for the guest address */
514 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
517 vmaddr
|= gaddr
& ~PMD_MASK
;
518 /* Get pointer to the page table entry */
519 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
521 ptep_zap_unused(gmap
->mm
, vmaddr
, ptep
, 0);
522 pte_unmap_unlock(ptep
, ptl
);
525 EXPORT_SYMBOL_GPL(__gmap_zap
);
527 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
529 unsigned long gaddr
, vmaddr
, size
;
530 struct vm_area_struct
*vma
;
532 down_read(&gmap
->mm
->mmap_sem
);
533 for (gaddr
= from
; gaddr
< to
;
534 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
535 /* Find the vm address for the guest address */
536 vmaddr
= (unsigned long)
537 radix_tree_lookup(&gmap
->guest_to_host
,
541 vmaddr
|= gaddr
& ~PMD_MASK
;
542 /* Find vma in the parent mm */
543 vma
= find_vma(gmap
->mm
, vmaddr
);
544 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
545 zap_page_range(vma
, vmaddr
, size
, NULL
);
547 up_read(&gmap
->mm
->mmap_sem
);
549 EXPORT_SYMBOL_GPL(gmap_discard
);
551 static LIST_HEAD(gmap_notifier_list
);
552 static DEFINE_SPINLOCK(gmap_notifier_lock
);
555 * gmap_register_ipte_notifier - register a pte invalidation callback
556 * @nb: pointer to the gmap notifier block
558 void gmap_register_ipte_notifier(struct gmap_notifier
*nb
)
560 spin_lock(&gmap_notifier_lock
);
561 list_add(&nb
->list
, &gmap_notifier_list
);
562 spin_unlock(&gmap_notifier_lock
);
564 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier
);
567 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
568 * @nb: pointer to the gmap notifier block
570 void gmap_unregister_ipte_notifier(struct gmap_notifier
*nb
)
572 spin_lock(&gmap_notifier_lock
);
573 list_del_init(&nb
->list
);
574 spin_unlock(&gmap_notifier_lock
);
576 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier
);
579 * gmap_ipte_notify - mark a range of ptes for invalidation notification
580 * @gmap: pointer to guest mapping meta data structure
581 * @gaddr: virtual address in the guest address space
584 * Returns 0 if for each page in the given range a gmap mapping exists and
585 * the invalidation notification could be set. If the gmap mapping is missing
586 * for one or more pages -EFAULT is returned. If no memory could be allocated
587 * -ENOMEM is returned. This function establishes missing page table entries.
589 int gmap_ipte_notify(struct gmap
*gmap
, unsigned long gaddr
, unsigned long len
)
597 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
))
599 down_read(&gmap
->mm
->mmap_sem
);
602 /* Convert gmap address and connect the page tables */
603 addr
= __gmap_translate(gmap
, gaddr
);
604 if (IS_ERR_VALUE(addr
)) {
608 /* Get the page mapped */
609 if (fixup_user_fault(current
, gmap
->mm
, addr
, FAULT_FLAG_WRITE
,
614 /* While trying to map mmap_sem got unlocked. Let us retry */
617 rc
= __gmap_link(gmap
, gaddr
, addr
);
620 /* Walk the process page table, lock and get pte pointer */
621 ptep
= get_locked_pte(gmap
->mm
, addr
, &ptl
);
623 /* Set notification bit in the pgste of the pte */
624 if ((pte_val(*ptep
) & (_PAGE_INVALID
| _PAGE_PROTECT
)) == 0) {
625 ptep_set_notify(gmap
->mm
, addr
, ptep
);
629 pte_unmap_unlock(ptep
, ptl
);
631 up_read(&gmap
->mm
->mmap_sem
);
634 EXPORT_SYMBOL_GPL(gmap_ipte_notify
);
637 * ptep_notify - call all invalidation callbacks for a specific pte.
638 * @mm: pointer to the process mm_struct
639 * @addr: virtual address in the process address space
640 * @pte: pointer to the page table entry
642 * This function is assumed to be called with the page table lock held
643 * for the pte to notify.
645 void ptep_notify(struct mm_struct
*mm
, unsigned long vmaddr
, pte_t
*pte
)
647 unsigned long offset
, gaddr
;
648 unsigned long *table
;
649 struct gmap_notifier
*nb
;
652 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
653 offset
= offset
* (4096 / sizeof(pte_t
));
654 spin_lock(&gmap_notifier_lock
);
655 list_for_each_entry(gmap
, &mm
->context
.gmap_list
, list
) {
656 table
= radix_tree_lookup(&gmap
->host_to_guest
,
657 vmaddr
>> PMD_SHIFT
);
660 gaddr
= __gmap_segment_gaddr(table
) + offset
;
661 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
662 nb
->notifier_call(gmap
, gaddr
);
664 spin_unlock(&gmap_notifier_lock
);
666 EXPORT_SYMBOL_GPL(ptep_notify
);
668 static inline void thp_split_mm(struct mm_struct
*mm
)
670 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
671 struct vm_area_struct
*vma
;
674 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
675 for (addr
= vma
->vm_start
;
678 follow_page(vma
, addr
, FOLL_SPLIT
);
679 vma
->vm_flags
&= ~VM_HUGEPAGE
;
680 vma
->vm_flags
|= VM_NOHUGEPAGE
;
682 mm
->def_flags
|= VM_NOHUGEPAGE
;
687 * switch on pgstes for its userspace process (for kvm)
689 int s390_enable_sie(void)
691 struct mm_struct
*mm
= current
->mm
;
693 /* Do we have pgstes? if yes, we are done */
694 if (mm_has_pgste(mm
))
696 /* Fail if the page tables are 2K */
697 if (!mm_alloc_pgste(mm
))
699 down_write(&mm
->mmap_sem
);
700 mm
->context
.has_pgste
= 1;
701 /* split thp mappings and disable thp for future mappings */
703 up_write(&mm
->mmap_sem
);
706 EXPORT_SYMBOL_GPL(s390_enable_sie
);
709 * Enable storage key handling from now on and initialize the storage
710 * keys with the default key.
712 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
713 unsigned long next
, struct mm_walk
*walk
)
716 * Remove all zero page mappings,
717 * after establishing a policy to forbid zero page mappings
718 * following faults for that page will get fresh anonymous pages
720 if (is_zero_pfn(pte_pfn(*pte
)))
721 ptep_xchg_direct(walk
->mm
, addr
, pte
, __pte(_PAGE_INVALID
));
722 /* Clear storage key */
723 ptep_zap_key(walk
->mm
, addr
, pte
);
727 int s390_enable_skey(void)
729 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
730 struct mm_struct
*mm
= current
->mm
;
731 struct vm_area_struct
*vma
;
734 down_write(&mm
->mmap_sem
);
738 mm
->context
.use_skey
= 1;
739 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
740 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
741 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
742 mm
->context
.use_skey
= 0;
747 mm
->def_flags
&= ~VM_MERGEABLE
;
750 walk_page_range(0, TASK_SIZE
, &walk
);
753 up_write(&mm
->mmap_sem
);
756 EXPORT_SYMBOL_GPL(s390_enable_skey
);
759 * Reset CMMA state, make all pages stable again.
761 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
762 unsigned long next
, struct mm_walk
*walk
)
764 ptep_zap_unused(walk
->mm
, addr
, pte
, 1);
768 void s390_reset_cmma(struct mm_struct
*mm
)
770 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
772 down_write(&mm
->mmap_sem
);
774 walk_page_range(0, TASK_SIZE
, &walk
);
775 up_write(&mm
->mmap_sem
);
777 EXPORT_SYMBOL_GPL(s390_reset_cmma
);