Commit | Line | Data |
---|---|---|
b46e756f KS |
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2 | ||
3 | #include <linux/mm.h> | |
4 | #include <linux/sched.h> | |
5 | #include <linux/mmu_notifier.h> | |
6 | #include <linux/rmap.h> | |
7 | #include <linux/swap.h> | |
8 | #include <linux/mm_inline.h> | |
9 | #include <linux/kthread.h> | |
10 | #include <linux/khugepaged.h> | |
11 | #include <linux/freezer.h> | |
12 | #include <linux/mman.h> | |
13 | #include <linux/hashtable.h> | |
14 | #include <linux/userfaultfd_k.h> | |
15 | #include <linux/page_idle.h> | |
16 | #include <linux/swapops.h> | |
17 | ||
18 | #include <asm/tlb.h> | |
19 | #include <asm/pgalloc.h> | |
20 | #include "internal.h" | |
21 | ||
22 | enum scan_result { | |
23 | SCAN_FAIL, | |
24 | SCAN_SUCCEED, | |
25 | SCAN_PMD_NULL, | |
26 | SCAN_EXCEED_NONE_PTE, | |
27 | SCAN_PTE_NON_PRESENT, | |
28 | SCAN_PAGE_RO, | |
29 | SCAN_NO_REFERENCED_PAGE, | |
30 | SCAN_PAGE_NULL, | |
31 | SCAN_SCAN_ABORT, | |
32 | SCAN_PAGE_COUNT, | |
33 | SCAN_PAGE_LRU, | |
34 | SCAN_PAGE_LOCK, | |
35 | SCAN_PAGE_ANON, | |
36 | SCAN_PAGE_COMPOUND, | |
37 | SCAN_ANY_PROCESS, | |
38 | SCAN_VMA_NULL, | |
39 | SCAN_VMA_CHECK, | |
40 | SCAN_ADDRESS_RANGE, | |
41 | SCAN_SWAP_CACHE_PAGE, | |
42 | SCAN_DEL_PAGE_LRU, | |
43 | SCAN_ALLOC_HUGE_PAGE_FAIL, | |
44 | SCAN_CGROUP_CHARGE_FAIL, | |
45 | SCAN_EXCEED_SWAP_PTE | |
46 | }; | |
47 | ||
48 | #define CREATE_TRACE_POINTS | |
49 | #include <trace/events/huge_memory.h> | |
50 | ||
51 | /* default scan 8*512 pte (or vmas) every 30 second */ | |
52 | static unsigned int khugepaged_pages_to_scan __read_mostly; | |
53 | static unsigned int khugepaged_pages_collapsed; | |
54 | static unsigned int khugepaged_full_scans; | |
55 | static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; | |
56 | /* during fragmentation poll the hugepage allocator once every minute */ | |
57 | static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; | |
58 | static unsigned long khugepaged_sleep_expire; | |
59 | static DEFINE_SPINLOCK(khugepaged_mm_lock); | |
60 | static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); | |
61 | /* | |
62 | * default collapse hugepages if there is at least one pte mapped like | |
63 | * it would have happened if the vma was large enough during page | |
64 | * fault. | |
65 | */ | |
66 | static unsigned int khugepaged_max_ptes_none __read_mostly; | |
67 | static unsigned int khugepaged_max_ptes_swap __read_mostly; | |
68 | ||
69 | #define MM_SLOTS_HASH_BITS 10 | |
70 | static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); | |
71 | ||
72 | static struct kmem_cache *mm_slot_cache __read_mostly; | |
73 | ||
74 | /** | |
75 | * struct mm_slot - hash lookup from mm to mm_slot | |
76 | * @hash: hash collision list | |
77 | * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head | |
78 | * @mm: the mm that this information is valid for | |
79 | */ | |
80 | struct mm_slot { | |
81 | struct hlist_node hash; | |
82 | struct list_head mm_node; | |
83 | struct mm_struct *mm; | |
84 | }; | |
85 | ||
86 | /** | |
87 | * struct khugepaged_scan - cursor for scanning | |
88 | * @mm_head: the head of the mm list to scan | |
89 | * @mm_slot: the current mm_slot we are scanning | |
90 | * @address: the next address inside that to be scanned | |
91 | * | |
92 | * There is only the one khugepaged_scan instance of this cursor structure. | |
93 | */ | |
94 | struct khugepaged_scan { | |
95 | struct list_head mm_head; | |
96 | struct mm_slot *mm_slot; | |
97 | unsigned long address; | |
98 | }; | |
99 | ||
100 | static struct khugepaged_scan khugepaged_scan = { | |
101 | .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), | |
102 | }; | |
103 | ||
104 | static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, | |
105 | struct kobj_attribute *attr, | |
106 | char *buf) | |
107 | { | |
108 | return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); | |
109 | } | |
110 | ||
111 | static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, | |
112 | struct kobj_attribute *attr, | |
113 | const char *buf, size_t count) | |
114 | { | |
115 | unsigned long msecs; | |
116 | int err; | |
117 | ||
118 | err = kstrtoul(buf, 10, &msecs); | |
119 | if (err || msecs > UINT_MAX) | |
120 | return -EINVAL; | |
121 | ||
122 | khugepaged_scan_sleep_millisecs = msecs; | |
123 | khugepaged_sleep_expire = 0; | |
124 | wake_up_interruptible(&khugepaged_wait); | |
125 | ||
126 | return count; | |
127 | } | |
128 | static struct kobj_attribute scan_sleep_millisecs_attr = | |
129 | __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, | |
130 | scan_sleep_millisecs_store); | |
131 | ||
132 | static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, | |
133 | struct kobj_attribute *attr, | |
134 | char *buf) | |
135 | { | |
136 | return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); | |
137 | } | |
138 | ||
139 | static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, | |
140 | struct kobj_attribute *attr, | |
141 | const char *buf, size_t count) | |
142 | { | |
143 | unsigned long msecs; | |
144 | int err; | |
145 | ||
146 | err = kstrtoul(buf, 10, &msecs); | |
147 | if (err || msecs > UINT_MAX) | |
148 | return -EINVAL; | |
149 | ||
150 | khugepaged_alloc_sleep_millisecs = msecs; | |
151 | khugepaged_sleep_expire = 0; | |
152 | wake_up_interruptible(&khugepaged_wait); | |
153 | ||
154 | return count; | |
155 | } | |
156 | static struct kobj_attribute alloc_sleep_millisecs_attr = | |
157 | __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, | |
158 | alloc_sleep_millisecs_store); | |
159 | ||
160 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
161 | struct kobj_attribute *attr, | |
162 | char *buf) | |
163 | { | |
164 | return sprintf(buf, "%u\n", khugepaged_pages_to_scan); | |
165 | } | |
166 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
167 | struct kobj_attribute *attr, | |
168 | const char *buf, size_t count) | |
169 | { | |
170 | int err; | |
171 | unsigned long pages; | |
172 | ||
173 | err = kstrtoul(buf, 10, &pages); | |
174 | if (err || !pages || pages > UINT_MAX) | |
175 | return -EINVAL; | |
176 | ||
177 | khugepaged_pages_to_scan = pages; | |
178 | ||
179 | return count; | |
180 | } | |
181 | static struct kobj_attribute pages_to_scan_attr = | |
182 | __ATTR(pages_to_scan, 0644, pages_to_scan_show, | |
183 | pages_to_scan_store); | |
184 | ||
185 | static ssize_t pages_collapsed_show(struct kobject *kobj, | |
186 | struct kobj_attribute *attr, | |
187 | char *buf) | |
188 | { | |
189 | return sprintf(buf, "%u\n", khugepaged_pages_collapsed); | |
190 | } | |
191 | static struct kobj_attribute pages_collapsed_attr = | |
192 | __ATTR_RO(pages_collapsed); | |
193 | ||
194 | static ssize_t full_scans_show(struct kobject *kobj, | |
195 | struct kobj_attribute *attr, | |
196 | char *buf) | |
197 | { | |
198 | return sprintf(buf, "%u\n", khugepaged_full_scans); | |
199 | } | |
200 | static struct kobj_attribute full_scans_attr = | |
201 | __ATTR_RO(full_scans); | |
202 | ||
203 | static ssize_t khugepaged_defrag_show(struct kobject *kobj, | |
204 | struct kobj_attribute *attr, char *buf) | |
205 | { | |
206 | return single_hugepage_flag_show(kobj, attr, buf, | |
207 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); | |
208 | } | |
209 | static ssize_t khugepaged_defrag_store(struct kobject *kobj, | |
210 | struct kobj_attribute *attr, | |
211 | const char *buf, size_t count) | |
212 | { | |
213 | return single_hugepage_flag_store(kobj, attr, buf, count, | |
214 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); | |
215 | } | |
216 | static struct kobj_attribute khugepaged_defrag_attr = | |
217 | __ATTR(defrag, 0644, khugepaged_defrag_show, | |
218 | khugepaged_defrag_store); | |
219 | ||
220 | /* | |
221 | * max_ptes_none controls if khugepaged should collapse hugepages over | |
222 | * any unmapped ptes in turn potentially increasing the memory | |
223 | * footprint of the vmas. When max_ptes_none is 0 khugepaged will not | |
224 | * reduce the available free memory in the system as it | |
225 | * runs. Increasing max_ptes_none will instead potentially reduce the | |
226 | * free memory in the system during the khugepaged scan. | |
227 | */ | |
228 | static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, | |
229 | struct kobj_attribute *attr, | |
230 | char *buf) | |
231 | { | |
232 | return sprintf(buf, "%u\n", khugepaged_max_ptes_none); | |
233 | } | |
234 | static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, | |
235 | struct kobj_attribute *attr, | |
236 | const char *buf, size_t count) | |
237 | { | |
238 | int err; | |
239 | unsigned long max_ptes_none; | |
240 | ||
241 | err = kstrtoul(buf, 10, &max_ptes_none); | |
242 | if (err || max_ptes_none > HPAGE_PMD_NR-1) | |
243 | return -EINVAL; | |
244 | ||
245 | khugepaged_max_ptes_none = max_ptes_none; | |
246 | ||
247 | return count; | |
248 | } | |
249 | static struct kobj_attribute khugepaged_max_ptes_none_attr = | |
250 | __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, | |
251 | khugepaged_max_ptes_none_store); | |
252 | ||
253 | static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, | |
254 | struct kobj_attribute *attr, | |
255 | char *buf) | |
256 | { | |
257 | return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); | |
258 | } | |
259 | ||
260 | static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, | |
261 | struct kobj_attribute *attr, | |
262 | const char *buf, size_t count) | |
263 | { | |
264 | int err; | |
265 | unsigned long max_ptes_swap; | |
266 | ||
267 | err = kstrtoul(buf, 10, &max_ptes_swap); | |
268 | if (err || max_ptes_swap > HPAGE_PMD_NR-1) | |
269 | return -EINVAL; | |
270 | ||
271 | khugepaged_max_ptes_swap = max_ptes_swap; | |
272 | ||
273 | return count; | |
274 | } | |
275 | ||
276 | static struct kobj_attribute khugepaged_max_ptes_swap_attr = | |
277 | __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, | |
278 | khugepaged_max_ptes_swap_store); | |
279 | ||
280 | static struct attribute *khugepaged_attr[] = { | |
281 | &khugepaged_defrag_attr.attr, | |
282 | &khugepaged_max_ptes_none_attr.attr, | |
283 | &pages_to_scan_attr.attr, | |
284 | &pages_collapsed_attr.attr, | |
285 | &full_scans_attr.attr, | |
286 | &scan_sleep_millisecs_attr.attr, | |
287 | &alloc_sleep_millisecs_attr.attr, | |
288 | &khugepaged_max_ptes_swap_attr.attr, | |
289 | NULL, | |
290 | }; | |
291 | ||
292 | struct attribute_group khugepaged_attr_group = { | |
293 | .attrs = khugepaged_attr, | |
294 | .name = "khugepaged", | |
295 | }; | |
296 | ||
297 | #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) | |
298 | ||
299 | int hugepage_madvise(struct vm_area_struct *vma, | |
300 | unsigned long *vm_flags, int advice) | |
301 | { | |
302 | switch (advice) { | |
303 | case MADV_HUGEPAGE: | |
304 | #ifdef CONFIG_S390 | |
305 | /* | |
306 | * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 | |
307 | * can't handle this properly after s390_enable_sie, so we simply | |
308 | * ignore the madvise to prevent qemu from causing a SIGSEGV. | |
309 | */ | |
310 | if (mm_has_pgste(vma->vm_mm)) | |
311 | return 0; | |
312 | #endif | |
313 | *vm_flags &= ~VM_NOHUGEPAGE; | |
314 | *vm_flags |= VM_HUGEPAGE; | |
315 | /* | |
316 | * If the vma become good for khugepaged to scan, | |
317 | * register it here without waiting a page fault that | |
318 | * may not happen any time soon. | |
319 | */ | |
320 | if (!(*vm_flags & VM_NO_KHUGEPAGED) && | |
321 | khugepaged_enter_vma_merge(vma, *vm_flags)) | |
322 | return -ENOMEM; | |
323 | break; | |
324 | case MADV_NOHUGEPAGE: | |
325 | *vm_flags &= ~VM_HUGEPAGE; | |
326 | *vm_flags |= VM_NOHUGEPAGE; | |
327 | /* | |
328 | * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning | |
329 | * this vma even if we leave the mm registered in khugepaged if | |
330 | * it got registered before VM_NOHUGEPAGE was set. | |
331 | */ | |
332 | break; | |
333 | } | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | int __init khugepaged_init(void) | |
339 | { | |
340 | mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", | |
341 | sizeof(struct mm_slot), | |
342 | __alignof__(struct mm_slot), 0, NULL); | |
343 | if (!mm_slot_cache) | |
344 | return -ENOMEM; | |
345 | ||
346 | khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; | |
347 | khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; | |
348 | khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; | |
349 | ||
350 | return 0; | |
351 | } | |
352 | ||
353 | void __init khugepaged_destroy(void) | |
354 | { | |
355 | kmem_cache_destroy(mm_slot_cache); | |
356 | } | |
357 | ||
358 | static inline struct mm_slot *alloc_mm_slot(void) | |
359 | { | |
360 | if (!mm_slot_cache) /* initialization failed */ | |
361 | return NULL; | |
362 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
363 | } | |
364 | ||
365 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
366 | { | |
367 | kmem_cache_free(mm_slot_cache, mm_slot); | |
368 | } | |
369 | ||
370 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) | |
371 | { | |
372 | struct mm_slot *mm_slot; | |
373 | ||
374 | hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) | |
375 | if (mm == mm_slot->mm) | |
376 | return mm_slot; | |
377 | ||
378 | return NULL; | |
379 | } | |
380 | ||
381 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
382 | struct mm_slot *mm_slot) | |
383 | { | |
384 | mm_slot->mm = mm; | |
385 | hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); | |
386 | } | |
387 | ||
388 | static inline int khugepaged_test_exit(struct mm_struct *mm) | |
389 | { | |
390 | return atomic_read(&mm->mm_users) == 0; | |
391 | } | |
392 | ||
393 | int __khugepaged_enter(struct mm_struct *mm) | |
394 | { | |
395 | struct mm_slot *mm_slot; | |
396 | int wakeup; | |
397 | ||
398 | mm_slot = alloc_mm_slot(); | |
399 | if (!mm_slot) | |
400 | return -ENOMEM; | |
401 | ||
402 | /* __khugepaged_exit() must not run from under us */ | |
403 | VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); | |
404 | if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { | |
405 | free_mm_slot(mm_slot); | |
406 | return 0; | |
407 | } | |
408 | ||
409 | spin_lock(&khugepaged_mm_lock); | |
410 | insert_to_mm_slots_hash(mm, mm_slot); | |
411 | /* | |
412 | * Insert just behind the scanning cursor, to let the area settle | |
413 | * down a little. | |
414 | */ | |
415 | wakeup = list_empty(&khugepaged_scan.mm_head); | |
416 | list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); | |
417 | spin_unlock(&khugepaged_mm_lock); | |
418 | ||
419 | atomic_inc(&mm->mm_count); | |
420 | if (wakeup) | |
421 | wake_up_interruptible(&khugepaged_wait); | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | int khugepaged_enter_vma_merge(struct vm_area_struct *vma, | |
427 | unsigned long vm_flags) | |
428 | { | |
429 | unsigned long hstart, hend; | |
430 | if (!vma->anon_vma) | |
431 | /* | |
432 | * Not yet faulted in so we will register later in the | |
433 | * page fault if needed. | |
434 | */ | |
435 | return 0; | |
436 | if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) | |
437 | /* khugepaged not yet working on file or special mappings */ | |
438 | return 0; | |
439 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | |
440 | hend = vma->vm_end & HPAGE_PMD_MASK; | |
441 | if (hstart < hend) | |
442 | return khugepaged_enter(vma, vm_flags); | |
443 | return 0; | |
444 | } | |
445 | ||
446 | void __khugepaged_exit(struct mm_struct *mm) | |
447 | { | |
448 | struct mm_slot *mm_slot; | |
449 | int free = 0; | |
450 | ||
451 | spin_lock(&khugepaged_mm_lock); | |
452 | mm_slot = get_mm_slot(mm); | |
453 | if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { | |
454 | hash_del(&mm_slot->hash); | |
455 | list_del(&mm_slot->mm_node); | |
456 | free = 1; | |
457 | } | |
458 | spin_unlock(&khugepaged_mm_lock); | |
459 | ||
460 | if (free) { | |
461 | clear_bit(MMF_VM_HUGEPAGE, &mm->flags); | |
462 | free_mm_slot(mm_slot); | |
463 | mmdrop(mm); | |
464 | } else if (mm_slot) { | |
465 | /* | |
466 | * This is required to serialize against | |
467 | * khugepaged_test_exit() (which is guaranteed to run | |
468 | * under mmap sem read mode). Stop here (after we | |
469 | * return all pagetables will be destroyed) until | |
470 | * khugepaged has finished working on the pagetables | |
471 | * under the mmap_sem. | |
472 | */ | |
473 | down_write(&mm->mmap_sem); | |
474 | up_write(&mm->mmap_sem); | |
475 | } | |
476 | } | |
477 | ||
478 | static void release_pte_page(struct page *page) | |
479 | { | |
480 | /* 0 stands for page_is_file_cache(page) == false */ | |
481 | dec_zone_page_state(page, NR_ISOLATED_ANON + 0); | |
482 | unlock_page(page); | |
483 | putback_lru_page(page); | |
484 | } | |
485 | ||
486 | static void release_pte_pages(pte_t *pte, pte_t *_pte) | |
487 | { | |
488 | while (--_pte >= pte) { | |
489 | pte_t pteval = *_pte; | |
490 | if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) | |
491 | release_pte_page(pte_page(pteval)); | |
492 | } | |
493 | } | |
494 | ||
495 | static int __collapse_huge_page_isolate(struct vm_area_struct *vma, | |
496 | unsigned long address, | |
497 | pte_t *pte) | |
498 | { | |
499 | struct page *page = NULL; | |
500 | pte_t *_pte; | |
501 | int none_or_zero = 0, result = 0; | |
502 | bool referenced = false, writable = false; | |
503 | ||
504 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; | |
505 | _pte++, address += PAGE_SIZE) { | |
506 | pte_t pteval = *_pte; | |
507 | if (pte_none(pteval) || (pte_present(pteval) && | |
508 | is_zero_pfn(pte_pfn(pteval)))) { | |
509 | if (!userfaultfd_armed(vma) && | |
510 | ++none_or_zero <= khugepaged_max_ptes_none) { | |
511 | continue; | |
512 | } else { | |
513 | result = SCAN_EXCEED_NONE_PTE; | |
514 | goto out; | |
515 | } | |
516 | } | |
517 | if (!pte_present(pteval)) { | |
518 | result = SCAN_PTE_NON_PRESENT; | |
519 | goto out; | |
520 | } | |
521 | page = vm_normal_page(vma, address, pteval); | |
522 | if (unlikely(!page)) { | |
523 | result = SCAN_PAGE_NULL; | |
524 | goto out; | |
525 | } | |
526 | ||
527 | VM_BUG_ON_PAGE(PageCompound(page), page); | |
528 | VM_BUG_ON_PAGE(!PageAnon(page), page); | |
529 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | |
530 | ||
531 | /* | |
532 | * We can do it before isolate_lru_page because the | |
533 | * page can't be freed from under us. NOTE: PG_lock | |
534 | * is needed to serialize against split_huge_page | |
535 | * when invoked from the VM. | |
536 | */ | |
537 | if (!trylock_page(page)) { | |
538 | result = SCAN_PAGE_LOCK; | |
539 | goto out; | |
540 | } | |
541 | ||
542 | /* | |
543 | * cannot use mapcount: can't collapse if there's a gup pin. | |
544 | * The page must only be referenced by the scanned process | |
545 | * and page swap cache. | |
546 | */ | |
547 | if (page_count(page) != 1 + !!PageSwapCache(page)) { | |
548 | unlock_page(page); | |
549 | result = SCAN_PAGE_COUNT; | |
550 | goto out; | |
551 | } | |
552 | if (pte_write(pteval)) { | |
553 | writable = true; | |
554 | } else { | |
555 | if (PageSwapCache(page) && | |
556 | !reuse_swap_page(page, NULL)) { | |
557 | unlock_page(page); | |
558 | result = SCAN_SWAP_CACHE_PAGE; | |
559 | goto out; | |
560 | } | |
561 | /* | |
562 | * Page is not in the swap cache. It can be collapsed | |
563 | * into a THP. | |
564 | */ | |
565 | } | |
566 | ||
567 | /* | |
568 | * Isolate the page to avoid collapsing an hugepage | |
569 | * currently in use by the VM. | |
570 | */ | |
571 | if (isolate_lru_page(page)) { | |
572 | unlock_page(page); | |
573 | result = SCAN_DEL_PAGE_LRU; | |
574 | goto out; | |
575 | } | |
576 | /* 0 stands for page_is_file_cache(page) == false */ | |
577 | inc_zone_page_state(page, NR_ISOLATED_ANON + 0); | |
578 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
579 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
580 | ||
581 | /* If there is no mapped pte young don't collapse the page */ | |
582 | if (pte_young(pteval) || | |
583 | page_is_young(page) || PageReferenced(page) || | |
584 | mmu_notifier_test_young(vma->vm_mm, address)) | |
585 | referenced = true; | |
586 | } | |
587 | if (likely(writable)) { | |
588 | if (likely(referenced)) { | |
589 | result = SCAN_SUCCEED; | |
590 | trace_mm_collapse_huge_page_isolate(page, none_or_zero, | |
591 | referenced, writable, result); | |
592 | return 1; | |
593 | } | |
594 | } else { | |
595 | result = SCAN_PAGE_RO; | |
596 | } | |
597 | ||
598 | out: | |
599 | release_pte_pages(pte, _pte); | |
600 | trace_mm_collapse_huge_page_isolate(page, none_or_zero, | |
601 | referenced, writable, result); | |
602 | return 0; | |
603 | } | |
604 | ||
605 | static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |
606 | struct vm_area_struct *vma, | |
607 | unsigned long address, | |
608 | spinlock_t *ptl) | |
609 | { | |
610 | pte_t *_pte; | |
611 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { | |
612 | pte_t pteval = *_pte; | |
613 | struct page *src_page; | |
614 | ||
615 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { | |
616 | clear_user_highpage(page, address); | |
617 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); | |
618 | if (is_zero_pfn(pte_pfn(pteval))) { | |
619 | /* | |
620 | * ptl mostly unnecessary. | |
621 | */ | |
622 | spin_lock(ptl); | |
623 | /* | |
624 | * paravirt calls inside pte_clear here are | |
625 | * superfluous. | |
626 | */ | |
627 | pte_clear(vma->vm_mm, address, _pte); | |
628 | spin_unlock(ptl); | |
629 | } | |
630 | } else { | |
631 | src_page = pte_page(pteval); | |
632 | copy_user_highpage(page, src_page, address, vma); | |
633 | VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); | |
634 | release_pte_page(src_page); | |
635 | /* | |
636 | * ptl mostly unnecessary, but preempt has to | |
637 | * be disabled to update the per-cpu stats | |
638 | * inside page_remove_rmap(). | |
639 | */ | |
640 | spin_lock(ptl); | |
641 | /* | |
642 | * paravirt calls inside pte_clear here are | |
643 | * superfluous. | |
644 | */ | |
645 | pte_clear(vma->vm_mm, address, _pte); | |
646 | page_remove_rmap(src_page, false); | |
647 | spin_unlock(ptl); | |
648 | free_page_and_swap_cache(src_page); | |
649 | } | |
650 | ||
651 | address += PAGE_SIZE; | |
652 | page++; | |
653 | } | |
654 | } | |
655 | ||
656 | static void khugepaged_alloc_sleep(void) | |
657 | { | |
658 | DEFINE_WAIT(wait); | |
659 | ||
660 | add_wait_queue(&khugepaged_wait, &wait); | |
661 | freezable_schedule_timeout_interruptible( | |
662 | msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); | |
663 | remove_wait_queue(&khugepaged_wait, &wait); | |
664 | } | |
665 | ||
666 | static int khugepaged_node_load[MAX_NUMNODES]; | |
667 | ||
668 | static bool khugepaged_scan_abort(int nid) | |
669 | { | |
670 | int i; | |
671 | ||
672 | /* | |
673 | * If zone_reclaim_mode is disabled, then no extra effort is made to | |
674 | * allocate memory locally. | |
675 | */ | |
676 | if (!zone_reclaim_mode) | |
677 | return false; | |
678 | ||
679 | /* If there is a count for this node already, it must be acceptable */ | |
680 | if (khugepaged_node_load[nid]) | |
681 | return false; | |
682 | ||
683 | for (i = 0; i < MAX_NUMNODES; i++) { | |
684 | if (!khugepaged_node_load[i]) | |
685 | continue; | |
686 | if (node_distance(nid, i) > RECLAIM_DISTANCE) | |
687 | return true; | |
688 | } | |
689 | return false; | |
690 | } | |
691 | ||
692 | /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ | |
693 | static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) | |
694 | { | |
695 | return GFP_TRANSHUGE | (khugepaged_defrag() ? __GFP_DIRECT_RECLAIM : 0); | |
696 | } | |
697 | ||
698 | #ifdef CONFIG_NUMA | |
699 | static int khugepaged_find_target_node(void) | |
700 | { | |
701 | static int last_khugepaged_target_node = NUMA_NO_NODE; | |
702 | int nid, target_node = 0, max_value = 0; | |
703 | ||
704 | /* find first node with max normal pages hit */ | |
705 | for (nid = 0; nid < MAX_NUMNODES; nid++) | |
706 | if (khugepaged_node_load[nid] > max_value) { | |
707 | max_value = khugepaged_node_load[nid]; | |
708 | target_node = nid; | |
709 | } | |
710 | ||
711 | /* do some balance if several nodes have the same hit record */ | |
712 | if (target_node <= last_khugepaged_target_node) | |
713 | for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; | |
714 | nid++) | |
715 | if (max_value == khugepaged_node_load[nid]) { | |
716 | target_node = nid; | |
717 | break; | |
718 | } | |
719 | ||
720 | last_khugepaged_target_node = target_node; | |
721 | return target_node; | |
722 | } | |
723 | ||
724 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) | |
725 | { | |
726 | if (IS_ERR(*hpage)) { | |
727 | if (!*wait) | |
728 | return false; | |
729 | ||
730 | *wait = false; | |
731 | *hpage = NULL; | |
732 | khugepaged_alloc_sleep(); | |
733 | } else if (*hpage) { | |
734 | put_page(*hpage); | |
735 | *hpage = NULL; | |
736 | } | |
737 | ||
738 | return true; | |
739 | } | |
740 | ||
741 | static struct page * | |
742 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, | |
743 | unsigned long address, int node) | |
744 | { | |
745 | VM_BUG_ON_PAGE(*hpage, *hpage); | |
746 | ||
747 | /* | |
748 | * Before allocating the hugepage, release the mmap_sem read lock. | |
749 | * The allocation can take potentially a long time if it involves | |
750 | * sync compaction, and we do not need to hold the mmap_sem during | |
751 | * that. We will recheck the vma after taking it again in write mode. | |
752 | */ | |
753 | up_read(&mm->mmap_sem); | |
754 | ||
755 | *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); | |
756 | if (unlikely(!*hpage)) { | |
757 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | |
758 | *hpage = ERR_PTR(-ENOMEM); | |
759 | return NULL; | |
760 | } | |
761 | ||
762 | prep_transhuge_page(*hpage); | |
763 | count_vm_event(THP_COLLAPSE_ALLOC); | |
764 | return *hpage; | |
765 | } | |
766 | #else | |
767 | static int khugepaged_find_target_node(void) | |
768 | { | |
769 | return 0; | |
770 | } | |
771 | ||
772 | static inline struct page *alloc_khugepaged_hugepage(void) | |
773 | { | |
774 | struct page *page; | |
775 | ||
776 | page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), | |
777 | HPAGE_PMD_ORDER); | |
778 | if (page) | |
779 | prep_transhuge_page(page); | |
780 | return page; | |
781 | } | |
782 | ||
783 | static struct page *khugepaged_alloc_hugepage(bool *wait) | |
784 | { | |
785 | struct page *hpage; | |
786 | ||
787 | do { | |
788 | hpage = alloc_khugepaged_hugepage(); | |
789 | if (!hpage) { | |
790 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | |
791 | if (!*wait) | |
792 | return NULL; | |
793 | ||
794 | *wait = false; | |
795 | khugepaged_alloc_sleep(); | |
796 | } else | |
797 | count_vm_event(THP_COLLAPSE_ALLOC); | |
798 | } while (unlikely(!hpage) && likely(khugepaged_enabled())); | |
799 | ||
800 | return hpage; | |
801 | } | |
802 | ||
803 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) | |
804 | { | |
805 | if (!*hpage) | |
806 | *hpage = khugepaged_alloc_hugepage(wait); | |
807 | ||
808 | if (unlikely(!*hpage)) | |
809 | return false; | |
810 | ||
811 | return true; | |
812 | } | |
813 | ||
814 | static struct page * | |
815 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, | |
816 | unsigned long address, int node) | |
817 | { | |
818 | up_read(&mm->mmap_sem); | |
819 | VM_BUG_ON(!*hpage); | |
820 | ||
821 | return *hpage; | |
822 | } | |
823 | #endif | |
824 | ||
825 | static bool hugepage_vma_check(struct vm_area_struct *vma) | |
826 | { | |
827 | if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || | |
828 | (vma->vm_flags & VM_NOHUGEPAGE)) | |
829 | return false; | |
830 | if (!vma->anon_vma || vma->vm_ops) | |
831 | return false; | |
832 | if (is_vma_temporary_stack(vma)) | |
833 | return false; | |
834 | return !(vma->vm_flags & VM_NO_KHUGEPAGED); | |
835 | } | |
836 | ||
837 | /* | |
838 | * If mmap_sem temporarily dropped, revalidate vma | |
839 | * before taking mmap_sem. | |
840 | * Return 0 if succeeds, otherwise return none-zero | |
841 | * value (scan code). | |
842 | */ | |
843 | ||
844 | static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) | |
845 | { | |
846 | struct vm_area_struct *vma; | |
847 | unsigned long hstart, hend; | |
848 | ||
849 | if (unlikely(khugepaged_test_exit(mm))) | |
850 | return SCAN_ANY_PROCESS; | |
851 | ||
852 | vma = find_vma(mm, address); | |
853 | if (!vma) | |
854 | return SCAN_VMA_NULL; | |
855 | ||
856 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | |
857 | hend = vma->vm_end & HPAGE_PMD_MASK; | |
858 | if (address < hstart || address + HPAGE_PMD_SIZE > hend) | |
859 | return SCAN_ADDRESS_RANGE; | |
860 | if (!hugepage_vma_check(vma)) | |
861 | return SCAN_VMA_CHECK; | |
862 | return 0; | |
863 | } | |
864 | ||
865 | /* | |
866 | * Bring missing pages in from swap, to complete THP collapse. | |
867 | * Only done if khugepaged_scan_pmd believes it is worthwhile. | |
868 | * | |
869 | * Called and returns without pte mapped or spinlocks held, | |
870 | * but with mmap_sem held to protect against vma changes. | |
871 | */ | |
872 | ||
873 | static bool __collapse_huge_page_swapin(struct mm_struct *mm, | |
874 | struct vm_area_struct *vma, | |
875 | unsigned long address, pmd_t *pmd) | |
876 | { | |
877 | pte_t pteval; | |
878 | int swapped_in = 0, ret = 0; | |
879 | struct fault_env fe = { | |
880 | .vma = vma, | |
881 | .address = address, | |
882 | .flags = FAULT_FLAG_ALLOW_RETRY, | |
883 | .pmd = pmd, | |
884 | }; | |
885 | ||
886 | fe.pte = pte_offset_map(pmd, address); | |
887 | for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; | |
888 | fe.pte++, fe.address += PAGE_SIZE) { | |
889 | pteval = *fe.pte; | |
890 | if (!is_swap_pte(pteval)) | |
891 | continue; | |
892 | swapped_in++; | |
893 | ret = do_swap_page(&fe, pteval); | |
894 | /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ | |
895 | if (ret & VM_FAULT_RETRY) { | |
896 | down_read(&mm->mmap_sem); | |
897 | /* vma is no longer available, don't continue to swapin */ | |
898 | if (hugepage_vma_revalidate(mm, address)) | |
899 | return false; | |
900 | /* check if the pmd is still valid */ | |
901 | if (mm_find_pmd(mm, address) != pmd) | |
902 | return false; | |
903 | } | |
904 | if (ret & VM_FAULT_ERROR) { | |
905 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); | |
906 | return false; | |
907 | } | |
908 | /* pte is unmapped now, we need to map it */ | |
909 | fe.pte = pte_offset_map(pmd, fe.address); | |
910 | } | |
911 | fe.pte--; | |
912 | pte_unmap(fe.pte); | |
913 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); | |
914 | return true; | |
915 | } | |
916 | ||
917 | static void collapse_huge_page(struct mm_struct *mm, | |
918 | unsigned long address, | |
919 | struct page **hpage, | |
920 | struct vm_area_struct *vma, | |
921 | int node) | |
922 | { | |
923 | pmd_t *pmd, _pmd; | |
924 | pte_t *pte; | |
925 | pgtable_t pgtable; | |
926 | struct page *new_page; | |
927 | spinlock_t *pmd_ptl, *pte_ptl; | |
928 | int isolated = 0, result = 0; | |
929 | struct mem_cgroup *memcg; | |
930 | unsigned long mmun_start; /* For mmu_notifiers */ | |
931 | unsigned long mmun_end; /* For mmu_notifiers */ | |
932 | gfp_t gfp; | |
933 | ||
934 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
935 | ||
936 | /* Only allocate from the target node */ | |
937 | gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; | |
938 | ||
939 | /* release the mmap_sem read lock. */ | |
940 | new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); | |
941 | if (!new_page) { | |
942 | result = SCAN_ALLOC_HUGE_PAGE_FAIL; | |
943 | goto out_nolock; | |
944 | } | |
945 | ||
946 | if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { | |
947 | result = SCAN_CGROUP_CHARGE_FAIL; | |
948 | goto out_nolock; | |
949 | } | |
950 | ||
951 | down_read(&mm->mmap_sem); | |
952 | result = hugepage_vma_revalidate(mm, address); | |
953 | if (result) { | |
954 | mem_cgroup_cancel_charge(new_page, memcg, true); | |
955 | up_read(&mm->mmap_sem); | |
956 | goto out_nolock; | |
957 | } | |
958 | ||
959 | pmd = mm_find_pmd(mm, address); | |
960 | if (!pmd) { | |
961 | result = SCAN_PMD_NULL; | |
962 | mem_cgroup_cancel_charge(new_page, memcg, true); | |
963 | up_read(&mm->mmap_sem); | |
964 | goto out_nolock; | |
965 | } | |
966 | ||
967 | /* | |
968 | * __collapse_huge_page_swapin always returns with mmap_sem locked. | |
969 | * If it fails, release mmap_sem and jump directly out. | |
970 | * Continuing to collapse causes inconsistency. | |
971 | */ | |
972 | if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { | |
973 | mem_cgroup_cancel_charge(new_page, memcg, true); | |
974 | up_read(&mm->mmap_sem); | |
975 | goto out_nolock; | |
976 | } | |
977 | ||
978 | up_read(&mm->mmap_sem); | |
979 | /* | |
980 | * Prevent all access to pagetables with the exception of | |
981 | * gup_fast later handled by the ptep_clear_flush and the VM | |
982 | * handled by the anon_vma lock + PG_lock. | |
983 | */ | |
984 | down_write(&mm->mmap_sem); | |
985 | result = hugepage_vma_revalidate(mm, address); | |
986 | if (result) | |
987 | goto out; | |
988 | /* check if the pmd is still valid */ | |
989 | if (mm_find_pmd(mm, address) != pmd) | |
990 | goto out; | |
991 | ||
992 | anon_vma_lock_write(vma->anon_vma); | |
993 | ||
994 | pte = pte_offset_map(pmd, address); | |
995 | pte_ptl = pte_lockptr(mm, pmd); | |
996 | ||
997 | mmun_start = address; | |
998 | mmun_end = address + HPAGE_PMD_SIZE; | |
999 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | |
1000 | pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ | |
1001 | /* | |
1002 | * After this gup_fast can't run anymore. This also removes | |
1003 | * any huge TLB entry from the CPU so we won't allow | |
1004 | * huge and small TLB entries for the same virtual address | |
1005 | * to avoid the risk of CPU bugs in that area. | |
1006 | */ | |
1007 | _pmd = pmdp_collapse_flush(vma, address, pmd); | |
1008 | spin_unlock(pmd_ptl); | |
1009 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | |
1010 | ||
1011 | spin_lock(pte_ptl); | |
1012 | isolated = __collapse_huge_page_isolate(vma, address, pte); | |
1013 | spin_unlock(pte_ptl); | |
1014 | ||
1015 | if (unlikely(!isolated)) { | |
1016 | pte_unmap(pte); | |
1017 | spin_lock(pmd_ptl); | |
1018 | BUG_ON(!pmd_none(*pmd)); | |
1019 | /* | |
1020 | * We can only use set_pmd_at when establishing | |
1021 | * hugepmds and never for establishing regular pmds that | |
1022 | * points to regular pagetables. Use pmd_populate for that | |
1023 | */ | |
1024 | pmd_populate(mm, pmd, pmd_pgtable(_pmd)); | |
1025 | spin_unlock(pmd_ptl); | |
1026 | anon_vma_unlock_write(vma->anon_vma); | |
1027 | result = SCAN_FAIL; | |
1028 | goto out; | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * All pages are isolated and locked so anon_vma rmap | |
1033 | * can't run anymore. | |
1034 | */ | |
1035 | anon_vma_unlock_write(vma->anon_vma); | |
1036 | ||
1037 | __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); | |
1038 | pte_unmap(pte); | |
1039 | __SetPageUptodate(new_page); | |
1040 | pgtable = pmd_pgtable(_pmd); | |
1041 | ||
1042 | _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); | |
1043 | _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); | |
1044 | ||
1045 | /* | |
1046 | * spin_lock() below is not the equivalent of smp_wmb(), so | |
1047 | * this is needed to avoid the copy_huge_page writes to become | |
1048 | * visible after the set_pmd_at() write. | |
1049 | */ | |
1050 | smp_wmb(); | |
1051 | ||
1052 | spin_lock(pmd_ptl); | |
1053 | BUG_ON(!pmd_none(*pmd)); | |
1054 | page_add_new_anon_rmap(new_page, vma, address, true); | |
1055 | mem_cgroup_commit_charge(new_page, memcg, false, true); | |
1056 | lru_cache_add_active_or_unevictable(new_page, vma); | |
1057 | pgtable_trans_huge_deposit(mm, pmd, pgtable); | |
1058 | set_pmd_at(mm, address, pmd, _pmd); | |
1059 | update_mmu_cache_pmd(vma, address, pmd); | |
1060 | spin_unlock(pmd_ptl); | |
1061 | ||
1062 | *hpage = NULL; | |
1063 | ||
1064 | khugepaged_pages_collapsed++; | |
1065 | result = SCAN_SUCCEED; | |
1066 | out_up_write: | |
1067 | up_write(&mm->mmap_sem); | |
1068 | out_nolock: | |
1069 | trace_mm_collapse_huge_page(mm, isolated, result); | |
1070 | return; | |
1071 | out: | |
1072 | mem_cgroup_cancel_charge(new_page, memcg, true); | |
1073 | goto out_up_write; | |
1074 | } | |
1075 | ||
1076 | static int khugepaged_scan_pmd(struct mm_struct *mm, | |
1077 | struct vm_area_struct *vma, | |
1078 | unsigned long address, | |
1079 | struct page **hpage) | |
1080 | { | |
1081 | pmd_t *pmd; | |
1082 | pte_t *pte, *_pte; | |
1083 | int ret = 0, none_or_zero = 0, result = 0; | |
1084 | struct page *page = NULL; | |
1085 | unsigned long _address; | |
1086 | spinlock_t *ptl; | |
1087 | int node = NUMA_NO_NODE, unmapped = 0; | |
1088 | bool writable = false, referenced = false; | |
1089 | ||
1090 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1091 | ||
1092 | pmd = mm_find_pmd(mm, address); | |
1093 | if (!pmd) { | |
1094 | result = SCAN_PMD_NULL; | |
1095 | goto out; | |
1096 | } | |
1097 | ||
1098 | memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); | |
1099 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | |
1100 | for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; | |
1101 | _pte++, _address += PAGE_SIZE) { | |
1102 | pte_t pteval = *_pte; | |
1103 | if (is_swap_pte(pteval)) { | |
1104 | if (++unmapped <= khugepaged_max_ptes_swap) { | |
1105 | continue; | |
1106 | } else { | |
1107 | result = SCAN_EXCEED_SWAP_PTE; | |
1108 | goto out_unmap; | |
1109 | } | |
1110 | } | |
1111 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { | |
1112 | if (!userfaultfd_armed(vma) && | |
1113 | ++none_or_zero <= khugepaged_max_ptes_none) { | |
1114 | continue; | |
1115 | } else { | |
1116 | result = SCAN_EXCEED_NONE_PTE; | |
1117 | goto out_unmap; | |
1118 | } | |
1119 | } | |
1120 | if (!pte_present(pteval)) { | |
1121 | result = SCAN_PTE_NON_PRESENT; | |
1122 | goto out_unmap; | |
1123 | } | |
1124 | if (pte_write(pteval)) | |
1125 | writable = true; | |
1126 | ||
1127 | page = vm_normal_page(vma, _address, pteval); | |
1128 | if (unlikely(!page)) { | |
1129 | result = SCAN_PAGE_NULL; | |
1130 | goto out_unmap; | |
1131 | } | |
1132 | ||
1133 | /* TODO: teach khugepaged to collapse THP mapped with pte */ | |
1134 | if (PageCompound(page)) { | |
1135 | result = SCAN_PAGE_COMPOUND; | |
1136 | goto out_unmap; | |
1137 | } | |
1138 | ||
1139 | /* | |
1140 | * Record which node the original page is from and save this | |
1141 | * information to khugepaged_node_load[]. | |
1142 | * Khupaged will allocate hugepage from the node has the max | |
1143 | * hit record. | |
1144 | */ | |
1145 | node = page_to_nid(page); | |
1146 | if (khugepaged_scan_abort(node)) { | |
1147 | result = SCAN_SCAN_ABORT; | |
1148 | goto out_unmap; | |
1149 | } | |
1150 | khugepaged_node_load[node]++; | |
1151 | if (!PageLRU(page)) { | |
1152 | result = SCAN_PAGE_LRU; | |
1153 | goto out_unmap; | |
1154 | } | |
1155 | if (PageLocked(page)) { | |
1156 | result = SCAN_PAGE_LOCK; | |
1157 | goto out_unmap; | |
1158 | } | |
1159 | if (!PageAnon(page)) { | |
1160 | result = SCAN_PAGE_ANON; | |
1161 | goto out_unmap; | |
1162 | } | |
1163 | ||
1164 | /* | |
1165 | * cannot use mapcount: can't collapse if there's a gup pin. | |
1166 | * The page must only be referenced by the scanned process | |
1167 | * and page swap cache. | |
1168 | */ | |
1169 | if (page_count(page) != 1 + !!PageSwapCache(page)) { | |
1170 | result = SCAN_PAGE_COUNT; | |
1171 | goto out_unmap; | |
1172 | } | |
1173 | if (pte_young(pteval) || | |
1174 | page_is_young(page) || PageReferenced(page) || | |
1175 | mmu_notifier_test_young(vma->vm_mm, address)) | |
1176 | referenced = true; | |
1177 | } | |
1178 | if (writable) { | |
1179 | if (referenced) { | |
1180 | result = SCAN_SUCCEED; | |
1181 | ret = 1; | |
1182 | } else { | |
1183 | result = SCAN_NO_REFERENCED_PAGE; | |
1184 | } | |
1185 | } else { | |
1186 | result = SCAN_PAGE_RO; | |
1187 | } | |
1188 | out_unmap: | |
1189 | pte_unmap_unlock(pte, ptl); | |
1190 | if (ret) { | |
1191 | node = khugepaged_find_target_node(); | |
1192 | /* collapse_huge_page will return with the mmap_sem released */ | |
1193 | collapse_huge_page(mm, address, hpage, vma, node); | |
1194 | } | |
1195 | out: | |
1196 | trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, | |
1197 | none_or_zero, result, unmapped); | |
1198 | return ret; | |
1199 | } | |
1200 | ||
1201 | static void collect_mm_slot(struct mm_slot *mm_slot) | |
1202 | { | |
1203 | struct mm_struct *mm = mm_slot->mm; | |
1204 | ||
1205 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); | |
1206 | ||
1207 | if (khugepaged_test_exit(mm)) { | |
1208 | /* free mm_slot */ | |
1209 | hash_del(&mm_slot->hash); | |
1210 | list_del(&mm_slot->mm_node); | |
1211 | ||
1212 | /* | |
1213 | * Not strictly needed because the mm exited already. | |
1214 | * | |
1215 | * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); | |
1216 | */ | |
1217 | ||
1218 | /* khugepaged_mm_lock actually not necessary for the below */ | |
1219 | free_mm_slot(mm_slot); | |
1220 | mmdrop(mm); | |
1221 | } | |
1222 | } | |
1223 | ||
1224 | static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |
1225 | struct page **hpage) | |
1226 | __releases(&khugepaged_mm_lock) | |
1227 | __acquires(&khugepaged_mm_lock) | |
1228 | { | |
1229 | struct mm_slot *mm_slot; | |
1230 | struct mm_struct *mm; | |
1231 | struct vm_area_struct *vma; | |
1232 | int progress = 0; | |
1233 | ||
1234 | VM_BUG_ON(!pages); | |
1235 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); | |
1236 | ||
1237 | if (khugepaged_scan.mm_slot) | |
1238 | mm_slot = khugepaged_scan.mm_slot; | |
1239 | else { | |
1240 | mm_slot = list_entry(khugepaged_scan.mm_head.next, | |
1241 | struct mm_slot, mm_node); | |
1242 | khugepaged_scan.address = 0; | |
1243 | khugepaged_scan.mm_slot = mm_slot; | |
1244 | } | |
1245 | spin_unlock(&khugepaged_mm_lock); | |
1246 | ||
1247 | mm = mm_slot->mm; | |
1248 | down_read(&mm->mmap_sem); | |
1249 | if (unlikely(khugepaged_test_exit(mm))) | |
1250 | vma = NULL; | |
1251 | else | |
1252 | vma = find_vma(mm, khugepaged_scan.address); | |
1253 | ||
1254 | progress++; | |
1255 | for (; vma; vma = vma->vm_next) { | |
1256 | unsigned long hstart, hend; | |
1257 | ||
1258 | cond_resched(); | |
1259 | if (unlikely(khugepaged_test_exit(mm))) { | |
1260 | progress++; | |
1261 | break; | |
1262 | } | |
1263 | if (!hugepage_vma_check(vma)) { | |
1264 | skip: | |
1265 | progress++; | |
1266 | continue; | |
1267 | } | |
1268 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | |
1269 | hend = vma->vm_end & HPAGE_PMD_MASK; | |
1270 | if (hstart >= hend) | |
1271 | goto skip; | |
1272 | if (khugepaged_scan.address > hend) | |
1273 | goto skip; | |
1274 | if (khugepaged_scan.address < hstart) | |
1275 | khugepaged_scan.address = hstart; | |
1276 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); | |
1277 | ||
1278 | while (khugepaged_scan.address < hend) { | |
1279 | int ret; | |
1280 | cond_resched(); | |
1281 | if (unlikely(khugepaged_test_exit(mm))) | |
1282 | goto breakouterloop; | |
1283 | ||
1284 | VM_BUG_ON(khugepaged_scan.address < hstart || | |
1285 | khugepaged_scan.address + HPAGE_PMD_SIZE > | |
1286 | hend); | |
1287 | ret = khugepaged_scan_pmd(mm, vma, | |
1288 | khugepaged_scan.address, | |
1289 | hpage); | |
1290 | /* move to next address */ | |
1291 | khugepaged_scan.address += HPAGE_PMD_SIZE; | |
1292 | progress += HPAGE_PMD_NR; | |
1293 | if (ret) | |
1294 | /* we released mmap_sem so break loop */ | |
1295 | goto breakouterloop_mmap_sem; | |
1296 | if (progress >= pages) | |
1297 | goto breakouterloop; | |
1298 | } | |
1299 | } | |
1300 | breakouterloop: | |
1301 | up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ | |
1302 | breakouterloop_mmap_sem: | |
1303 | ||
1304 | spin_lock(&khugepaged_mm_lock); | |
1305 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); | |
1306 | /* | |
1307 | * Release the current mm_slot if this mm is about to die, or | |
1308 | * if we scanned all vmas of this mm. | |
1309 | */ | |
1310 | if (khugepaged_test_exit(mm) || !vma) { | |
1311 | /* | |
1312 | * Make sure that if mm_users is reaching zero while | |
1313 | * khugepaged runs here, khugepaged_exit will find | |
1314 | * mm_slot not pointing to the exiting mm. | |
1315 | */ | |
1316 | if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { | |
1317 | khugepaged_scan.mm_slot = list_entry( | |
1318 | mm_slot->mm_node.next, | |
1319 | struct mm_slot, mm_node); | |
1320 | khugepaged_scan.address = 0; | |
1321 | } else { | |
1322 | khugepaged_scan.mm_slot = NULL; | |
1323 | khugepaged_full_scans++; | |
1324 | } | |
1325 | ||
1326 | collect_mm_slot(mm_slot); | |
1327 | } | |
1328 | ||
1329 | return progress; | |
1330 | } | |
1331 | ||
1332 | static int khugepaged_has_work(void) | |
1333 | { | |
1334 | return !list_empty(&khugepaged_scan.mm_head) && | |
1335 | khugepaged_enabled(); | |
1336 | } | |
1337 | ||
1338 | static int khugepaged_wait_event(void) | |
1339 | { | |
1340 | return !list_empty(&khugepaged_scan.mm_head) || | |
1341 | kthread_should_stop(); | |
1342 | } | |
1343 | ||
1344 | static void khugepaged_do_scan(void) | |
1345 | { | |
1346 | struct page *hpage = NULL; | |
1347 | unsigned int progress = 0, pass_through_head = 0; | |
1348 | unsigned int pages = khugepaged_pages_to_scan; | |
1349 | bool wait = true; | |
1350 | ||
1351 | barrier(); /* write khugepaged_pages_to_scan to local stack */ | |
1352 | ||
1353 | while (progress < pages) { | |
1354 | if (!khugepaged_prealloc_page(&hpage, &wait)) | |
1355 | break; | |
1356 | ||
1357 | cond_resched(); | |
1358 | ||
1359 | if (unlikely(kthread_should_stop() || try_to_freeze())) | |
1360 | break; | |
1361 | ||
1362 | spin_lock(&khugepaged_mm_lock); | |
1363 | if (!khugepaged_scan.mm_slot) | |
1364 | pass_through_head++; | |
1365 | if (khugepaged_has_work() && | |
1366 | pass_through_head < 2) | |
1367 | progress += khugepaged_scan_mm_slot(pages - progress, | |
1368 | &hpage); | |
1369 | else | |
1370 | progress = pages; | |
1371 | spin_unlock(&khugepaged_mm_lock); | |
1372 | } | |
1373 | ||
1374 | if (!IS_ERR_OR_NULL(hpage)) | |
1375 | put_page(hpage); | |
1376 | } | |
1377 | ||
1378 | static bool khugepaged_should_wakeup(void) | |
1379 | { | |
1380 | return kthread_should_stop() || | |
1381 | time_after_eq(jiffies, khugepaged_sleep_expire); | |
1382 | } | |
1383 | ||
1384 | static void khugepaged_wait_work(void) | |
1385 | { | |
1386 | if (khugepaged_has_work()) { | |
1387 | const unsigned long scan_sleep_jiffies = | |
1388 | msecs_to_jiffies(khugepaged_scan_sleep_millisecs); | |
1389 | ||
1390 | if (!scan_sleep_jiffies) | |
1391 | return; | |
1392 | ||
1393 | khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; | |
1394 | wait_event_freezable_timeout(khugepaged_wait, | |
1395 | khugepaged_should_wakeup(), | |
1396 | scan_sleep_jiffies); | |
1397 | return; | |
1398 | } | |
1399 | ||
1400 | if (khugepaged_enabled()) | |
1401 | wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); | |
1402 | } | |
1403 | ||
1404 | static int khugepaged(void *none) | |
1405 | { | |
1406 | struct mm_slot *mm_slot; | |
1407 | ||
1408 | set_freezable(); | |
1409 | set_user_nice(current, MAX_NICE); | |
1410 | ||
1411 | while (!kthread_should_stop()) { | |
1412 | khugepaged_do_scan(); | |
1413 | khugepaged_wait_work(); | |
1414 | } | |
1415 | ||
1416 | spin_lock(&khugepaged_mm_lock); | |
1417 | mm_slot = khugepaged_scan.mm_slot; | |
1418 | khugepaged_scan.mm_slot = NULL; | |
1419 | if (mm_slot) | |
1420 | collect_mm_slot(mm_slot); | |
1421 | spin_unlock(&khugepaged_mm_lock); | |
1422 | return 0; | |
1423 | } | |
1424 | ||
1425 | static void set_recommended_min_free_kbytes(void) | |
1426 | { | |
1427 | struct zone *zone; | |
1428 | int nr_zones = 0; | |
1429 | unsigned long recommended_min; | |
1430 | ||
1431 | for_each_populated_zone(zone) | |
1432 | nr_zones++; | |
1433 | ||
1434 | /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ | |
1435 | recommended_min = pageblock_nr_pages * nr_zones * 2; | |
1436 | ||
1437 | /* | |
1438 | * Make sure that on average at least two pageblocks are almost free | |
1439 | * of another type, one for a migratetype to fall back to and a | |
1440 | * second to avoid subsequent fallbacks of other types There are 3 | |
1441 | * MIGRATE_TYPES we care about. | |
1442 | */ | |
1443 | recommended_min += pageblock_nr_pages * nr_zones * | |
1444 | MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; | |
1445 | ||
1446 | /* don't ever allow to reserve more than 5% of the lowmem */ | |
1447 | recommended_min = min(recommended_min, | |
1448 | (unsigned long) nr_free_buffer_pages() / 20); | |
1449 | recommended_min <<= (PAGE_SHIFT-10); | |
1450 | ||
1451 | if (recommended_min > min_free_kbytes) { | |
1452 | if (user_min_free_kbytes >= 0) | |
1453 | pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", | |
1454 | min_free_kbytes, recommended_min); | |
1455 | ||
1456 | min_free_kbytes = recommended_min; | |
1457 | } | |
1458 | setup_per_zone_wmarks(); | |
1459 | } | |
1460 | ||
1461 | int start_stop_khugepaged(void) | |
1462 | { | |
1463 | static struct task_struct *khugepaged_thread __read_mostly; | |
1464 | static DEFINE_MUTEX(khugepaged_mutex); | |
1465 | int err = 0; | |
1466 | ||
1467 | mutex_lock(&khugepaged_mutex); | |
1468 | if (khugepaged_enabled()) { | |
1469 | if (!khugepaged_thread) | |
1470 | khugepaged_thread = kthread_run(khugepaged, NULL, | |
1471 | "khugepaged"); | |
1472 | if (IS_ERR(khugepaged_thread)) { | |
1473 | pr_err("khugepaged: kthread_run(khugepaged) failed\n"); | |
1474 | err = PTR_ERR(khugepaged_thread); | |
1475 | khugepaged_thread = NULL; | |
1476 | goto fail; | |
1477 | } | |
1478 | ||
1479 | if (!list_empty(&khugepaged_scan.mm_head)) | |
1480 | wake_up_interruptible(&khugepaged_wait); | |
1481 | ||
1482 | set_recommended_min_free_kbytes(); | |
1483 | } else if (khugepaged_thread) { | |
1484 | kthread_stop(khugepaged_thread); | |
1485 | khugepaged_thread = NULL; | |
1486 | } | |
1487 | fail: | |
1488 | mutex_unlock(&khugepaged_mutex); | |
1489 | return err; | |
1490 | } |