vmscan: remove dead code in shrink_inactive_list()
[deliverable/linux.git] / mm / vmscan.c
1 /*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
41 #include <linux/delayacct.h>
42 #include <linux/sysctl.h>
43
44 #include <asm/tlbflush.h>
45 #include <asm/div64.h>
46
47 #include <linux/swapops.h>
48
49 #include "internal.h"
50
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/vmscan.h>
53
54 enum lumpy_mode {
55 LUMPY_MODE_NONE,
56 LUMPY_MODE_ASYNC,
57 LUMPY_MODE_SYNC,
58 };
59
60 struct scan_control {
61 /* Incremented by the number of inactive pages that were scanned */
62 unsigned long nr_scanned;
63
64 /* Number of pages freed so far during a call to shrink_zones() */
65 unsigned long nr_reclaimed;
66
67 /* How many pages shrink_list() should reclaim */
68 unsigned long nr_to_reclaim;
69
70 unsigned long hibernation_mode;
71
72 /* This context's GFP mask */
73 gfp_t gfp_mask;
74
75 int may_writepage;
76
77 /* Can mapped pages be reclaimed? */
78 int may_unmap;
79
80 /* Can pages be swapped as part of reclaim? */
81 int may_swap;
82
83 int swappiness;
84
85 int order;
86
87 /*
88 * Intend to reclaim enough continuous memory rather than reclaim
89 * enough amount of memory. i.e, mode for high order allocation.
90 */
91 enum lumpy_mode lumpy_reclaim_mode;
92
93 /* Which cgroup do we reclaim from */
94 struct mem_cgroup *mem_cgroup;
95
96 /*
97 * Nodemask of nodes allowed by the caller. If NULL, all nodes
98 * are scanned.
99 */
100 nodemask_t *nodemask;
101 };
102
103 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
104
105 #ifdef ARCH_HAS_PREFETCH
106 #define prefetch_prev_lru_page(_page, _base, _field) \
107 do { \
108 if ((_page)->lru.prev != _base) { \
109 struct page *prev; \
110 \
111 prev = lru_to_page(&(_page->lru)); \
112 prefetch(&prev->_field); \
113 } \
114 } while (0)
115 #else
116 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
117 #endif
118
119 #ifdef ARCH_HAS_PREFETCHW
120 #define prefetchw_prev_lru_page(_page, _base, _field) \
121 do { \
122 if ((_page)->lru.prev != _base) { \
123 struct page *prev; \
124 \
125 prev = lru_to_page(&(_page->lru)); \
126 prefetchw(&prev->_field); \
127 } \
128 } while (0)
129 #else
130 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
131 #endif
132
133 /*
134 * From 0 .. 100. Higher means more swappy.
135 */
136 int vm_swappiness = 60;
137 long vm_total_pages; /* The total number of pages which the VM controls */
138
139 static LIST_HEAD(shrinker_list);
140 static DECLARE_RWSEM(shrinker_rwsem);
141
142 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
143 #define scanning_global_lru(sc) (!(sc)->mem_cgroup)
144 #else
145 #define scanning_global_lru(sc) (1)
146 #endif
147
148 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
149 struct scan_control *sc)
150 {
151 if (!scanning_global_lru(sc))
152 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
153
154 return &zone->reclaim_stat;
155 }
156
157 static unsigned long zone_nr_lru_pages(struct zone *zone,
158 struct scan_control *sc, enum lru_list lru)
159 {
160 if (!scanning_global_lru(sc))
161 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
162
163 return zone_page_state(zone, NR_LRU_BASE + lru);
164 }
165
166
167 /*
168 * Add a shrinker callback to be called from the vm
169 */
170 void register_shrinker(struct shrinker *shrinker)
171 {
172 shrinker->nr = 0;
173 down_write(&shrinker_rwsem);
174 list_add_tail(&shrinker->list, &shrinker_list);
175 up_write(&shrinker_rwsem);
176 }
177 EXPORT_SYMBOL(register_shrinker);
178
179 /*
180 * Remove one
181 */
182 void unregister_shrinker(struct shrinker *shrinker)
183 {
184 down_write(&shrinker_rwsem);
185 list_del(&shrinker->list);
186 up_write(&shrinker_rwsem);
187 }
188 EXPORT_SYMBOL(unregister_shrinker);
189
190 #define SHRINK_BATCH 128
191 /*
192 * Call the shrink functions to age shrinkable caches
193 *
194 * Here we assume it costs one seek to replace a lru page and that it also
195 * takes a seek to recreate a cache object. With this in mind we age equal
196 * percentages of the lru and ageable caches. This should balance the seeks
197 * generated by these structures.
198 *
199 * If the vm encountered mapped pages on the LRU it increase the pressure on
200 * slab to avoid swapping.
201 *
202 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
203 *
204 * `lru_pages' represents the number of on-LRU pages in all the zones which
205 * are eligible for the caller's allocation attempt. It is used for balancing
206 * slab reclaim versus page reclaim.
207 *
208 * Returns the number of slab objects which we shrunk.
209 */
210 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
211 unsigned long lru_pages)
212 {
213 struct shrinker *shrinker;
214 unsigned long ret = 0;
215
216 if (scanned == 0)
217 scanned = SWAP_CLUSTER_MAX;
218
219 if (!down_read_trylock(&shrinker_rwsem))
220 return 1; /* Assume we'll be able to shrink next time */
221
222 list_for_each_entry(shrinker, &shrinker_list, list) {
223 unsigned long long delta;
224 unsigned long total_scan;
225 unsigned long max_pass;
226
227 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
228 delta = (4 * scanned) / shrinker->seeks;
229 delta *= max_pass;
230 do_div(delta, lru_pages + 1);
231 shrinker->nr += delta;
232 if (shrinker->nr < 0) {
233 printk(KERN_ERR "shrink_slab: %pF negative objects to "
234 "delete nr=%ld\n",
235 shrinker->shrink, shrinker->nr);
236 shrinker->nr = max_pass;
237 }
238
239 /*
240 * Avoid risking looping forever due to too large nr value:
241 * never try to free more than twice the estimate number of
242 * freeable entries.
243 */
244 if (shrinker->nr > max_pass * 2)
245 shrinker->nr = max_pass * 2;
246
247 total_scan = shrinker->nr;
248 shrinker->nr = 0;
249
250 while (total_scan >= SHRINK_BATCH) {
251 long this_scan = SHRINK_BATCH;
252 int shrink_ret;
253 int nr_before;
254
255 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
256 shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
257 gfp_mask);
258 if (shrink_ret == -1)
259 break;
260 if (shrink_ret < nr_before)
261 ret += nr_before - shrink_ret;
262 count_vm_events(SLABS_SCANNED, this_scan);
263 total_scan -= this_scan;
264
265 cond_resched();
266 }
267
268 shrinker->nr += total_scan;
269 }
270 up_read(&shrinker_rwsem);
271 return ret;
272 }
273
274 static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
275 bool sync)
276 {
277 enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
278
279 /*
280 * Some reclaim have alredy been failed. No worth to try synchronous
281 * lumpy reclaim.
282 */
283 if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
284 return;
285
286 /*
287 * If we need a large contiguous chunk of memory, or have
288 * trouble getting a small set of contiguous pages, we
289 * will reclaim both active and inactive pages.
290 */
291 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
292 sc->lumpy_reclaim_mode = mode;
293 else if (sc->order && priority < DEF_PRIORITY - 2)
294 sc->lumpy_reclaim_mode = mode;
295 else
296 sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
297 }
298
299 static void disable_lumpy_reclaim_mode(struct scan_control *sc)
300 {
301 sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
302 }
303
304 static inline int is_page_cache_freeable(struct page *page)
305 {
306 /*
307 * A freeable page cache page is referenced only by the caller
308 * that isolated the page, the page cache radix tree and
309 * optional buffer heads at page->private.
310 */
311 return page_count(page) - page_has_private(page) == 2;
312 }
313
314 static int may_write_to_queue(struct backing_dev_info *bdi,
315 struct scan_control *sc)
316 {
317 if (current->flags & PF_SWAPWRITE)
318 return 1;
319 if (!bdi_write_congested(bdi))
320 return 1;
321 if (bdi == current->backing_dev_info)
322 return 1;
323
324 /* lumpy reclaim for hugepage often need a lot of write */
325 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
326 return 1;
327 return 0;
328 }
329
330 /*
331 * We detected a synchronous write error writing a page out. Probably
332 * -ENOSPC. We need to propagate that into the address_space for a subsequent
333 * fsync(), msync() or close().
334 *
335 * The tricky part is that after writepage we cannot touch the mapping: nothing
336 * prevents it from being freed up. But we have a ref on the page and once
337 * that page is locked, the mapping is pinned.
338 *
339 * We're allowed to run sleeping lock_page() here because we know the caller has
340 * __GFP_FS.
341 */
342 static void handle_write_error(struct address_space *mapping,
343 struct page *page, int error)
344 {
345 lock_page_nosync(page);
346 if (page_mapping(page) == mapping)
347 mapping_set_error(mapping, error);
348 unlock_page(page);
349 }
350
351 /* possible outcome of pageout() */
352 typedef enum {
353 /* failed to write page out, page is locked */
354 PAGE_KEEP,
355 /* move page to the active list, page is locked */
356 PAGE_ACTIVATE,
357 /* page has been sent to the disk successfully, page is unlocked */
358 PAGE_SUCCESS,
359 /* page is clean and locked */
360 PAGE_CLEAN,
361 } pageout_t;
362
363 /*
364 * pageout is called by shrink_page_list() for each dirty page.
365 * Calls ->writepage().
366 */
367 static pageout_t pageout(struct page *page, struct address_space *mapping,
368 struct scan_control *sc)
369 {
370 /*
371 * If the page is dirty, only perform writeback if that write
372 * will be non-blocking. To prevent this allocation from being
373 * stalled by pagecache activity. But note that there may be
374 * stalls if we need to run get_block(). We could test
375 * PagePrivate for that.
376 *
377 * If this process is currently in __generic_file_aio_write() against
378 * this page's queue, we can perform writeback even if that
379 * will block.
380 *
381 * If the page is swapcache, write it back even if that would
382 * block, for some throttling. This happens by accident, because
383 * swap_backing_dev_info is bust: it doesn't reflect the
384 * congestion state of the swapdevs. Easy to fix, if needed.
385 */
386 if (!is_page_cache_freeable(page))
387 return PAGE_KEEP;
388 if (!mapping) {
389 /*
390 * Some data journaling orphaned pages can have
391 * page->mapping == NULL while being dirty with clean buffers.
392 */
393 if (page_has_private(page)) {
394 if (try_to_free_buffers(page)) {
395 ClearPageDirty(page);
396 printk("%s: orphaned page\n", __func__);
397 return PAGE_CLEAN;
398 }
399 }
400 return PAGE_KEEP;
401 }
402 if (mapping->a_ops->writepage == NULL)
403 return PAGE_ACTIVATE;
404 if (!may_write_to_queue(mapping->backing_dev_info, sc)) {
405 disable_lumpy_reclaim_mode(sc);
406 return PAGE_KEEP;
407 }
408
409 if (clear_page_dirty_for_io(page)) {
410 int res;
411 struct writeback_control wbc = {
412 .sync_mode = WB_SYNC_NONE,
413 .nr_to_write = SWAP_CLUSTER_MAX,
414 .range_start = 0,
415 .range_end = LLONG_MAX,
416 .for_reclaim = 1,
417 };
418
419 SetPageReclaim(page);
420 res = mapping->a_ops->writepage(page, &wbc);
421 if (res < 0)
422 handle_write_error(mapping, page, res);
423 if (res == AOP_WRITEPAGE_ACTIVATE) {
424 ClearPageReclaim(page);
425 return PAGE_ACTIVATE;
426 }
427
428 /*
429 * Wait on writeback if requested to. This happens when
430 * direct reclaiming a large contiguous area and the
431 * first attempt to free a range of pages fails.
432 */
433 if (PageWriteback(page) &&
434 sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
435 wait_on_page_writeback(page);
436
437 if (!PageWriteback(page)) {
438 /* synchronous write or broken a_ops? */
439 ClearPageReclaim(page);
440 }
441 trace_mm_vmscan_writepage(page,
442 trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
443 inc_zone_page_state(page, NR_VMSCAN_WRITE);
444 return PAGE_SUCCESS;
445 }
446
447 return PAGE_CLEAN;
448 }
449
450 /*
451 * Same as remove_mapping, but if the page is removed from the mapping, it
452 * gets returned with a refcount of 0.
453 */
454 static int __remove_mapping(struct address_space *mapping, struct page *page)
455 {
456 BUG_ON(!PageLocked(page));
457 BUG_ON(mapping != page_mapping(page));
458
459 spin_lock_irq(&mapping->tree_lock);
460 /*
461 * The non racy check for a busy page.
462 *
463 * Must be careful with the order of the tests. When someone has
464 * a ref to the page, it may be possible that they dirty it then
465 * drop the reference. So if PageDirty is tested before page_count
466 * here, then the following race may occur:
467 *
468 * get_user_pages(&page);
469 * [user mapping goes away]
470 * write_to(page);
471 * !PageDirty(page) [good]
472 * SetPageDirty(page);
473 * put_page(page);
474 * !page_count(page) [good, discard it]
475 *
476 * [oops, our write_to data is lost]
477 *
478 * Reversing the order of the tests ensures such a situation cannot
479 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
480 * load is not satisfied before that of page->_count.
481 *
482 * Note that if SetPageDirty is always performed via set_page_dirty,
483 * and thus under tree_lock, then this ordering is not required.
484 */
485 if (!page_freeze_refs(page, 2))
486 goto cannot_free;
487 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
488 if (unlikely(PageDirty(page))) {
489 page_unfreeze_refs(page, 2);
490 goto cannot_free;
491 }
492
493 if (PageSwapCache(page)) {
494 swp_entry_t swap = { .val = page_private(page) };
495 __delete_from_swap_cache(page);
496 spin_unlock_irq(&mapping->tree_lock);
497 swapcache_free(swap, page);
498 } else {
499 __remove_from_page_cache(page);
500 spin_unlock_irq(&mapping->tree_lock);
501 mem_cgroup_uncharge_cache_page(page);
502 }
503
504 return 1;
505
506 cannot_free:
507 spin_unlock_irq(&mapping->tree_lock);
508 return 0;
509 }
510
511 /*
512 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
513 * someone else has a ref on the page, abort and return 0. If it was
514 * successfully detached, return 1. Assumes the caller has a single ref on
515 * this page.
516 */
517 int remove_mapping(struct address_space *mapping, struct page *page)
518 {
519 if (__remove_mapping(mapping, page)) {
520 /*
521 * Unfreezing the refcount with 1 rather than 2 effectively
522 * drops the pagecache ref for us without requiring another
523 * atomic operation.
524 */
525 page_unfreeze_refs(page, 1);
526 return 1;
527 }
528 return 0;
529 }
530
531 /**
532 * putback_lru_page - put previously isolated page onto appropriate LRU list
533 * @page: page to be put back to appropriate lru list
534 *
535 * Add previously isolated @page to appropriate LRU list.
536 * Page may still be unevictable for other reasons.
537 *
538 * lru_lock must not be held, interrupts must be enabled.
539 */
540 void putback_lru_page(struct page *page)
541 {
542 int lru;
543 int active = !!TestClearPageActive(page);
544 int was_unevictable = PageUnevictable(page);
545
546 VM_BUG_ON(PageLRU(page));
547
548 redo:
549 ClearPageUnevictable(page);
550
551 if (page_evictable(page, NULL)) {
552 /*
553 * For evictable pages, we can use the cache.
554 * In event of a race, worst case is we end up with an
555 * unevictable page on [in]active list.
556 * We know how to handle that.
557 */
558 lru = active + page_lru_base_type(page);
559 lru_cache_add_lru(page, lru);
560 } else {
561 /*
562 * Put unevictable pages directly on zone's unevictable
563 * list.
564 */
565 lru = LRU_UNEVICTABLE;
566 add_page_to_unevictable_list(page);
567 /*
568 * When racing with an mlock clearing (page is
569 * unlocked), make sure that if the other thread does
570 * not observe our setting of PG_lru and fails
571 * isolation, we see PG_mlocked cleared below and move
572 * the page back to the evictable list.
573 *
574 * The other side is TestClearPageMlocked().
575 */
576 smp_mb();
577 }
578
579 /*
580 * page's status can change while we move it among lru. If an evictable
581 * page is on unevictable list, it never be freed. To avoid that,
582 * check after we added it to the list, again.
583 */
584 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
585 if (!isolate_lru_page(page)) {
586 put_page(page);
587 goto redo;
588 }
589 /* This means someone else dropped this page from LRU
590 * So, it will be freed or putback to LRU again. There is
591 * nothing to do here.
592 */
593 }
594
595 if (was_unevictable && lru != LRU_UNEVICTABLE)
596 count_vm_event(UNEVICTABLE_PGRESCUED);
597 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
598 count_vm_event(UNEVICTABLE_PGCULLED);
599
600 put_page(page); /* drop ref from isolate */
601 }
602
603 enum page_references {
604 PAGEREF_RECLAIM,
605 PAGEREF_RECLAIM_CLEAN,
606 PAGEREF_KEEP,
607 PAGEREF_ACTIVATE,
608 };
609
610 static enum page_references page_check_references(struct page *page,
611 struct scan_control *sc)
612 {
613 int referenced_ptes, referenced_page;
614 unsigned long vm_flags;
615
616 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
617 referenced_page = TestClearPageReferenced(page);
618
619 /* Lumpy reclaim - ignore references */
620 if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
621 return PAGEREF_RECLAIM;
622
623 /*
624 * Mlock lost the isolation race with us. Let try_to_unmap()
625 * move the page to the unevictable list.
626 */
627 if (vm_flags & VM_LOCKED)
628 return PAGEREF_RECLAIM;
629
630 if (referenced_ptes) {
631 if (PageAnon(page))
632 return PAGEREF_ACTIVATE;
633 /*
634 * All mapped pages start out with page table
635 * references from the instantiating fault, so we need
636 * to look twice if a mapped file page is used more
637 * than once.
638 *
639 * Mark it and spare it for another trip around the
640 * inactive list. Another page table reference will
641 * lead to its activation.
642 *
643 * Note: the mark is set for activated pages as well
644 * so that recently deactivated but used pages are
645 * quickly recovered.
646 */
647 SetPageReferenced(page);
648
649 if (referenced_page)
650 return PAGEREF_ACTIVATE;
651
652 return PAGEREF_KEEP;
653 }
654
655 /* Reclaim if clean, defer dirty pages to writeback */
656 if (referenced_page)
657 return PAGEREF_RECLAIM_CLEAN;
658
659 return PAGEREF_RECLAIM;
660 }
661
662 static noinline_for_stack void free_page_list(struct list_head *free_pages)
663 {
664 struct pagevec freed_pvec;
665 struct page *page, *tmp;
666
667 pagevec_init(&freed_pvec, 1);
668
669 list_for_each_entry_safe(page, tmp, free_pages, lru) {
670 list_del(&page->lru);
671 if (!pagevec_add(&freed_pvec, page)) {
672 __pagevec_free(&freed_pvec);
673 pagevec_reinit(&freed_pvec);
674 }
675 }
676
677 pagevec_free(&freed_pvec);
678 }
679
680 /*
681 * shrink_page_list() returns the number of reclaimed pages
682 */
683 static unsigned long shrink_page_list(struct list_head *page_list,
684 struct scan_control *sc)
685 {
686 LIST_HEAD(ret_pages);
687 LIST_HEAD(free_pages);
688 int pgactivate = 0;
689 unsigned long nr_reclaimed = 0;
690
691 cond_resched();
692
693 while (!list_empty(page_list)) {
694 enum page_references references;
695 struct address_space *mapping;
696 struct page *page;
697 int may_enter_fs;
698
699 cond_resched();
700
701 page = lru_to_page(page_list);
702 list_del(&page->lru);
703
704 if (!trylock_page(page))
705 goto keep;
706
707 VM_BUG_ON(PageActive(page));
708
709 sc->nr_scanned++;
710
711 if (unlikely(!page_evictable(page, NULL)))
712 goto cull_mlocked;
713
714 if (!sc->may_unmap && page_mapped(page))
715 goto keep_locked;
716
717 /* Double the slab pressure for mapped and swapcache pages */
718 if (page_mapped(page) || PageSwapCache(page))
719 sc->nr_scanned++;
720
721 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
722 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
723
724 if (PageWriteback(page)) {
725 /*
726 * Synchronous reclaim is performed in two passes,
727 * first an asynchronous pass over the list to
728 * start parallel writeback, and a second synchronous
729 * pass to wait for the IO to complete. Wait here
730 * for any page for which writeback has already
731 * started.
732 */
733 if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
734 may_enter_fs)
735 wait_on_page_writeback(page);
736 else {
737 unlock_page(page);
738 goto keep_lumpy;
739 }
740 }
741
742 references = page_check_references(page, sc);
743 switch (references) {
744 case PAGEREF_ACTIVATE:
745 goto activate_locked;
746 case PAGEREF_KEEP:
747 goto keep_locked;
748 case PAGEREF_RECLAIM:
749 case PAGEREF_RECLAIM_CLEAN:
750 ; /* try to reclaim the page below */
751 }
752
753 /*
754 * Anonymous process memory has backing store?
755 * Try to allocate it some swap space here.
756 */
757 if (PageAnon(page) && !PageSwapCache(page)) {
758 if (!(sc->gfp_mask & __GFP_IO))
759 goto keep_locked;
760 if (!add_to_swap(page))
761 goto activate_locked;
762 may_enter_fs = 1;
763 }
764
765 mapping = page_mapping(page);
766
767 /*
768 * The page is mapped into the page tables of one or more
769 * processes. Try to unmap it here.
770 */
771 if (page_mapped(page) && mapping) {
772 switch (try_to_unmap(page, TTU_UNMAP)) {
773 case SWAP_FAIL:
774 goto activate_locked;
775 case SWAP_AGAIN:
776 goto keep_locked;
777 case SWAP_MLOCK:
778 goto cull_mlocked;
779 case SWAP_SUCCESS:
780 ; /* try to free the page below */
781 }
782 }
783
784 if (PageDirty(page)) {
785 if (references == PAGEREF_RECLAIM_CLEAN)
786 goto keep_locked;
787 if (!may_enter_fs)
788 goto keep_locked;
789 if (!sc->may_writepage)
790 goto keep_locked;
791
792 /* Page is dirty, try to write it out here */
793 switch (pageout(page, mapping, sc)) {
794 case PAGE_KEEP:
795 goto keep_locked;
796 case PAGE_ACTIVATE:
797 goto activate_locked;
798 case PAGE_SUCCESS:
799 if (PageWriteback(page))
800 goto keep_lumpy;
801 if (PageDirty(page))
802 goto keep;
803
804 /*
805 * A synchronous write - probably a ramdisk. Go
806 * ahead and try to reclaim the page.
807 */
808 if (!trylock_page(page))
809 goto keep;
810 if (PageDirty(page) || PageWriteback(page))
811 goto keep_locked;
812 mapping = page_mapping(page);
813 case PAGE_CLEAN:
814 ; /* try to free the page below */
815 }
816 }
817
818 /*
819 * If the page has buffers, try to free the buffer mappings
820 * associated with this page. If we succeed we try to free
821 * the page as well.
822 *
823 * We do this even if the page is PageDirty().
824 * try_to_release_page() does not perform I/O, but it is
825 * possible for a page to have PageDirty set, but it is actually
826 * clean (all its buffers are clean). This happens if the
827 * buffers were written out directly, with submit_bh(). ext3
828 * will do this, as well as the blockdev mapping.
829 * try_to_release_page() will discover that cleanness and will
830 * drop the buffers and mark the page clean - it can be freed.
831 *
832 * Rarely, pages can have buffers and no ->mapping. These are
833 * the pages which were not successfully invalidated in
834 * truncate_complete_page(). We try to drop those buffers here
835 * and if that worked, and the page is no longer mapped into
836 * process address space (page_count == 1) it can be freed.
837 * Otherwise, leave the page on the LRU so it is swappable.
838 */
839 if (page_has_private(page)) {
840 if (!try_to_release_page(page, sc->gfp_mask))
841 goto activate_locked;
842 if (!mapping && page_count(page) == 1) {
843 unlock_page(page);
844 if (put_page_testzero(page))
845 goto free_it;
846 else {
847 /*
848 * rare race with speculative reference.
849 * the speculative reference will free
850 * this page shortly, so we may
851 * increment nr_reclaimed here (and
852 * leave it off the LRU).
853 */
854 nr_reclaimed++;
855 continue;
856 }
857 }
858 }
859
860 if (!mapping || !__remove_mapping(mapping, page))
861 goto keep_locked;
862
863 /*
864 * At this point, we have no other references and there is
865 * no way to pick any more up (removed from LRU, removed
866 * from pagecache). Can use non-atomic bitops now (and
867 * we obviously don't have to worry about waking up a process
868 * waiting on the page lock, because there are no references.
869 */
870 __clear_page_locked(page);
871 free_it:
872 nr_reclaimed++;
873
874 /*
875 * Is there need to periodically free_page_list? It would
876 * appear not as the counts should be low
877 */
878 list_add(&page->lru, &free_pages);
879 continue;
880
881 cull_mlocked:
882 if (PageSwapCache(page))
883 try_to_free_swap(page);
884 unlock_page(page);
885 putback_lru_page(page);
886 disable_lumpy_reclaim_mode(sc);
887 continue;
888
889 activate_locked:
890 /* Not a candidate for swapping, so reclaim swap space. */
891 if (PageSwapCache(page) && vm_swap_full())
892 try_to_free_swap(page);
893 VM_BUG_ON(PageActive(page));
894 SetPageActive(page);
895 pgactivate++;
896 keep_locked:
897 unlock_page(page);
898 keep:
899 disable_lumpy_reclaim_mode(sc);
900 keep_lumpy:
901 list_add(&page->lru, &ret_pages);
902 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
903 }
904
905 free_page_list(&free_pages);
906
907 list_splice(&ret_pages, page_list);
908 count_vm_events(PGACTIVATE, pgactivate);
909 return nr_reclaimed;
910 }
911
912 /*
913 * Attempt to remove the specified page from its LRU. Only take this page
914 * if it is of the appropriate PageActive status. Pages which are being
915 * freed elsewhere are also ignored.
916 *
917 * page: page to consider
918 * mode: one of the LRU isolation modes defined above
919 *
920 * returns 0 on success, -ve errno on failure.
921 */
922 int __isolate_lru_page(struct page *page, int mode, int file)
923 {
924 int ret = -EINVAL;
925
926 /* Only take pages on the LRU. */
927 if (!PageLRU(page))
928 return ret;
929
930 /*
931 * When checking the active state, we need to be sure we are
932 * dealing with comparible boolean values. Take the logical not
933 * of each.
934 */
935 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
936 return ret;
937
938 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
939 return ret;
940
941 /*
942 * When this function is being called for lumpy reclaim, we
943 * initially look into all LRU pages, active, inactive and
944 * unevictable; only give shrink_page_list evictable pages.
945 */
946 if (PageUnevictable(page))
947 return ret;
948
949 ret = -EBUSY;
950
951 if (likely(get_page_unless_zero(page))) {
952 /*
953 * Be careful not to clear PageLRU until after we're
954 * sure the page is not being freed elsewhere -- the
955 * page release code relies on it.
956 */
957 ClearPageLRU(page);
958 ret = 0;
959 }
960
961 return ret;
962 }
963
964 /*
965 * zone->lru_lock is heavily contended. Some of the functions that
966 * shrink the lists perform better by taking out a batch of pages
967 * and working on them outside the LRU lock.
968 *
969 * For pagecache intensive workloads, this function is the hottest
970 * spot in the kernel (apart from copy_*_user functions).
971 *
972 * Appropriate locks must be held before calling this function.
973 *
974 * @nr_to_scan: The number of pages to look through on the list.
975 * @src: The LRU list to pull pages off.
976 * @dst: The temp list to put pages on to.
977 * @scanned: The number of pages that were scanned.
978 * @order: The caller's attempted allocation order
979 * @mode: One of the LRU isolation modes
980 * @file: True [1] if isolating file [!anon] pages
981 *
982 * returns how many pages were moved onto *@dst.
983 */
984 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
985 struct list_head *src, struct list_head *dst,
986 unsigned long *scanned, int order, int mode, int file)
987 {
988 unsigned long nr_taken = 0;
989 unsigned long nr_lumpy_taken = 0;
990 unsigned long nr_lumpy_dirty = 0;
991 unsigned long nr_lumpy_failed = 0;
992 unsigned long scan;
993
994 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
995 struct page *page;
996 unsigned long pfn;
997 unsigned long end_pfn;
998 unsigned long page_pfn;
999 int zone_id;
1000
1001 page = lru_to_page(src);
1002 prefetchw_prev_lru_page(page, src, flags);
1003
1004 VM_BUG_ON(!PageLRU(page));
1005
1006 switch (__isolate_lru_page(page, mode, file)) {
1007 case 0:
1008 list_move(&page->lru, dst);
1009 mem_cgroup_del_lru(page);
1010 nr_taken++;
1011 break;
1012
1013 case -EBUSY:
1014 /* else it is being freed elsewhere */
1015 list_move(&page->lru, src);
1016 mem_cgroup_rotate_lru_list(page, page_lru(page));
1017 continue;
1018
1019 default:
1020 BUG();
1021 }
1022
1023 if (!order)
1024 continue;
1025
1026 /*
1027 * Attempt to take all pages in the order aligned region
1028 * surrounding the tag page. Only take those pages of
1029 * the same active state as that tag page. We may safely
1030 * round the target page pfn down to the requested order
1031 * as the mem_map is guarenteed valid out to MAX_ORDER,
1032 * where that page is in a different zone we will detect
1033 * it from its zone id and abort this block scan.
1034 */
1035 zone_id = page_zone_id(page);
1036 page_pfn = page_to_pfn(page);
1037 pfn = page_pfn & ~((1 << order) - 1);
1038 end_pfn = pfn + (1 << order);
1039 for (; pfn < end_pfn; pfn++) {
1040 struct page *cursor_page;
1041
1042 /* The target page is in the block, ignore it. */
1043 if (unlikely(pfn == page_pfn))
1044 continue;
1045
1046 /* Avoid holes within the zone. */
1047 if (unlikely(!pfn_valid_within(pfn)))
1048 break;
1049
1050 cursor_page = pfn_to_page(pfn);
1051
1052 /* Check that we have not crossed a zone boundary. */
1053 if (unlikely(page_zone_id(cursor_page) != zone_id))
1054 continue;
1055
1056 /*
1057 * If we don't have enough swap space, reclaiming of
1058 * anon page which don't already have a swap slot is
1059 * pointless.
1060 */
1061 if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
1062 !PageSwapCache(cursor_page))
1063 continue;
1064
1065 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1066 list_move(&cursor_page->lru, dst);
1067 mem_cgroup_del_lru(cursor_page);
1068 nr_taken++;
1069 nr_lumpy_taken++;
1070 if (PageDirty(cursor_page))
1071 nr_lumpy_dirty++;
1072 scan++;
1073 } else {
1074 if (mode == ISOLATE_BOTH &&
1075 page_count(cursor_page))
1076 nr_lumpy_failed++;
1077 }
1078 }
1079 }
1080
1081 *scanned = scan;
1082
1083 trace_mm_vmscan_lru_isolate(order,
1084 nr_to_scan, scan,
1085 nr_taken,
1086 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1087 mode);
1088 return nr_taken;
1089 }
1090
1091 static unsigned long isolate_pages_global(unsigned long nr,
1092 struct list_head *dst,
1093 unsigned long *scanned, int order,
1094 int mode, struct zone *z,
1095 int active, int file)
1096 {
1097 int lru = LRU_BASE;
1098 if (active)
1099 lru += LRU_ACTIVE;
1100 if (file)
1101 lru += LRU_FILE;
1102 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
1103 mode, file);
1104 }
1105
1106 /*
1107 * clear_active_flags() is a helper for shrink_active_list(), clearing
1108 * any active bits from the pages in the list.
1109 */
1110 static unsigned long clear_active_flags(struct list_head *page_list,
1111 unsigned int *count)
1112 {
1113 int nr_active = 0;
1114 int lru;
1115 struct page *page;
1116
1117 list_for_each_entry(page, page_list, lru) {
1118 lru = page_lru_base_type(page);
1119 if (PageActive(page)) {
1120 lru += LRU_ACTIVE;
1121 ClearPageActive(page);
1122 nr_active++;
1123 }
1124 if (count)
1125 count[lru]++;
1126 }
1127
1128 return nr_active;
1129 }
1130
1131 /**
1132 * isolate_lru_page - tries to isolate a page from its LRU list
1133 * @page: page to isolate from its LRU list
1134 *
1135 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1136 * vmstat statistic corresponding to whatever LRU list the page was on.
1137 *
1138 * Returns 0 if the page was removed from an LRU list.
1139 * Returns -EBUSY if the page was not on an LRU list.
1140 *
1141 * The returned page will have PageLRU() cleared. If it was found on
1142 * the active list, it will have PageActive set. If it was found on
1143 * the unevictable list, it will have the PageUnevictable bit set. That flag
1144 * may need to be cleared by the caller before letting the page go.
1145 *
1146 * The vmstat statistic corresponding to the list on which the page was
1147 * found will be decremented.
1148 *
1149 * Restrictions:
1150 * (1) Must be called with an elevated refcount on the page. This is a
1151 * fundamentnal difference from isolate_lru_pages (which is called
1152 * without a stable reference).
1153 * (2) the lru_lock must not be held.
1154 * (3) interrupts must be enabled.
1155 */
1156 int isolate_lru_page(struct page *page)
1157 {
1158 int ret = -EBUSY;
1159
1160 if (PageLRU(page)) {
1161 struct zone *zone = page_zone(page);
1162
1163 spin_lock_irq(&zone->lru_lock);
1164 if (PageLRU(page) && get_page_unless_zero(page)) {
1165 int lru = page_lru(page);
1166 ret = 0;
1167 ClearPageLRU(page);
1168
1169 del_page_from_lru_list(zone, page, lru);
1170 }
1171 spin_unlock_irq(&zone->lru_lock);
1172 }
1173 return ret;
1174 }
1175
1176 /*
1177 * Are there way too many processes in the direct reclaim path already?
1178 */
1179 static int too_many_isolated(struct zone *zone, int file,
1180 struct scan_control *sc)
1181 {
1182 unsigned long inactive, isolated;
1183
1184 if (current_is_kswapd())
1185 return 0;
1186
1187 if (!scanning_global_lru(sc))
1188 return 0;
1189
1190 if (file) {
1191 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1192 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1193 } else {
1194 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1195 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1196 }
1197
1198 return isolated > inactive;
1199 }
1200
1201 /*
1202 * TODO: Try merging with migrations version of putback_lru_pages
1203 */
1204 static noinline_for_stack void
1205 putback_lru_pages(struct zone *zone, struct scan_control *sc,
1206 unsigned long nr_anon, unsigned long nr_file,
1207 struct list_head *page_list)
1208 {
1209 struct page *page;
1210 struct pagevec pvec;
1211 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1212
1213 pagevec_init(&pvec, 1);
1214
1215 /*
1216 * Put back any unfreeable pages.
1217 */
1218 spin_lock(&zone->lru_lock);
1219 while (!list_empty(page_list)) {
1220 int lru;
1221 page = lru_to_page(page_list);
1222 VM_BUG_ON(PageLRU(page));
1223 list_del(&page->lru);
1224 if (unlikely(!page_evictable(page, NULL))) {
1225 spin_unlock_irq(&zone->lru_lock);
1226 putback_lru_page(page);
1227 spin_lock_irq(&zone->lru_lock);
1228 continue;
1229 }
1230 SetPageLRU(page);
1231 lru = page_lru(page);
1232 add_page_to_lru_list(zone, page, lru);
1233 if (is_active_lru(lru)) {
1234 int file = is_file_lru(lru);
1235 reclaim_stat->recent_rotated[file]++;
1236 }
1237 if (!pagevec_add(&pvec, page)) {
1238 spin_unlock_irq(&zone->lru_lock);
1239 __pagevec_release(&pvec);
1240 spin_lock_irq(&zone->lru_lock);
1241 }
1242 }
1243 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1244 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1245
1246 spin_unlock_irq(&zone->lru_lock);
1247 pagevec_release(&pvec);
1248 }
1249
1250 static noinline_for_stack void update_isolated_counts(struct zone *zone,
1251 struct scan_control *sc,
1252 unsigned long *nr_anon,
1253 unsigned long *nr_file,
1254 struct list_head *isolated_list)
1255 {
1256 unsigned long nr_active;
1257 unsigned int count[NR_LRU_LISTS] = { 0, };
1258 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1259
1260 nr_active = clear_active_flags(isolated_list, count);
1261 __count_vm_events(PGDEACTIVATE, nr_active);
1262
1263 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1264 -count[LRU_ACTIVE_FILE]);
1265 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1266 -count[LRU_INACTIVE_FILE]);
1267 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1268 -count[LRU_ACTIVE_ANON]);
1269 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1270 -count[LRU_INACTIVE_ANON]);
1271
1272 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1273 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1274 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1275 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1276
1277 reclaim_stat->recent_scanned[0] += *nr_anon;
1278 reclaim_stat->recent_scanned[1] += *nr_file;
1279 }
1280
1281 /*
1282 * Returns true if the caller should wait to clean dirty/writeback pages.
1283 *
1284 * If we are direct reclaiming for contiguous pages and we do not reclaim
1285 * everything in the list, try again and wait for writeback IO to complete.
1286 * This will stall high-order allocations noticeably. Only do that when really
1287 * need to free the pages under high memory pressure.
1288 */
1289 static inline bool should_reclaim_stall(unsigned long nr_taken,
1290 unsigned long nr_freed,
1291 int priority,
1292 struct scan_control *sc)
1293 {
1294 int lumpy_stall_priority;
1295
1296 /* kswapd should not stall on sync IO */
1297 if (current_is_kswapd())
1298 return false;
1299
1300 /* Only stall on lumpy reclaim */
1301 if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
1302 return false;
1303
1304 /* If we have relaimed everything on the isolated list, no stall */
1305 if (nr_freed == nr_taken)
1306 return false;
1307
1308 /*
1309 * For high-order allocations, there are two stall thresholds.
1310 * High-cost allocations stall immediately where as lower
1311 * order allocations such as stacks require the scanning
1312 * priority to be much higher before stalling.
1313 */
1314 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1315 lumpy_stall_priority = DEF_PRIORITY;
1316 else
1317 lumpy_stall_priority = DEF_PRIORITY / 3;
1318
1319 return priority <= lumpy_stall_priority;
1320 }
1321
1322 /*
1323 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1324 * of reclaimed pages
1325 */
1326 static noinline_for_stack unsigned long
1327 shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1328 struct scan_control *sc, int priority, int file)
1329 {
1330 LIST_HEAD(page_list);
1331 unsigned long nr_scanned;
1332 unsigned long nr_reclaimed = 0;
1333 unsigned long nr_taken;
1334 unsigned long nr_anon;
1335 unsigned long nr_file;
1336
1337 while (unlikely(too_many_isolated(zone, file, sc))) {
1338 congestion_wait(BLK_RW_ASYNC, HZ/10);
1339
1340 /* We are about to die and free our memory. Return now. */
1341 if (fatal_signal_pending(current))
1342 return SWAP_CLUSTER_MAX;
1343 }
1344
1345 set_lumpy_reclaim_mode(priority, sc, false);
1346 lru_add_drain();
1347 spin_lock_irq(&zone->lru_lock);
1348
1349 if (scanning_global_lru(sc)) {
1350 nr_taken = isolate_pages_global(nr_to_scan,
1351 &page_list, &nr_scanned, sc->order,
1352 sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
1353 ISOLATE_INACTIVE : ISOLATE_BOTH,
1354 zone, 0, file);
1355 zone->pages_scanned += nr_scanned;
1356 if (current_is_kswapd())
1357 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1358 nr_scanned);
1359 else
1360 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1361 nr_scanned);
1362 } else {
1363 nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1364 &page_list, &nr_scanned, sc->order,
1365 sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
1366 ISOLATE_INACTIVE : ISOLATE_BOTH,
1367 zone, sc->mem_cgroup,
1368 0, file);
1369 /*
1370 * mem_cgroup_isolate_pages() keeps track of
1371 * scanned pages on its own.
1372 */
1373 }
1374
1375 if (nr_taken == 0) {
1376 spin_unlock_irq(&zone->lru_lock);
1377 return 0;
1378 }
1379
1380 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1381
1382 spin_unlock_irq(&zone->lru_lock);
1383
1384 nr_reclaimed = shrink_page_list(&page_list, sc);
1385
1386 /* Check if we should syncronously wait for writeback */
1387 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1388 set_lumpy_reclaim_mode(priority, sc, true);
1389 nr_reclaimed += shrink_page_list(&page_list, sc);
1390 }
1391
1392 local_irq_disable();
1393 if (current_is_kswapd())
1394 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1395 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1396
1397 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1398
1399 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1400 zone_idx(zone),
1401 nr_scanned, nr_reclaimed,
1402 priority,
1403 trace_shrink_flags(file, sc->lumpy_reclaim_mode));
1404 return nr_reclaimed;
1405 }
1406
1407 /*
1408 * This moves pages from the active list to the inactive list.
1409 *
1410 * We move them the other way if the page is referenced by one or more
1411 * processes, from rmap.
1412 *
1413 * If the pages are mostly unmapped, the processing is fast and it is
1414 * appropriate to hold zone->lru_lock across the whole operation. But if
1415 * the pages are mapped, the processing is slow (page_referenced()) so we
1416 * should drop zone->lru_lock around each page. It's impossible to balance
1417 * this, so instead we remove the pages from the LRU while processing them.
1418 * It is safe to rely on PG_active against the non-LRU pages in here because
1419 * nobody will play with that bit on a non-LRU page.
1420 *
1421 * The downside is that we have to touch page->_count against each page.
1422 * But we had to alter page->flags anyway.
1423 */
1424
1425 static void move_active_pages_to_lru(struct zone *zone,
1426 struct list_head *list,
1427 enum lru_list lru)
1428 {
1429 unsigned long pgmoved = 0;
1430 struct pagevec pvec;
1431 struct page *page;
1432
1433 pagevec_init(&pvec, 1);
1434
1435 while (!list_empty(list)) {
1436 page = lru_to_page(list);
1437
1438 VM_BUG_ON(PageLRU(page));
1439 SetPageLRU(page);
1440
1441 list_move(&page->lru, &zone->lru[lru].list);
1442 mem_cgroup_add_lru_list(page, lru);
1443 pgmoved++;
1444
1445 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1446 spin_unlock_irq(&zone->lru_lock);
1447 if (buffer_heads_over_limit)
1448 pagevec_strip(&pvec);
1449 __pagevec_release(&pvec);
1450 spin_lock_irq(&zone->lru_lock);
1451 }
1452 }
1453 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1454 if (!is_active_lru(lru))
1455 __count_vm_events(PGDEACTIVATE, pgmoved);
1456 }
1457
1458 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1459 struct scan_control *sc, int priority, int file)
1460 {
1461 unsigned long nr_taken;
1462 unsigned long pgscanned;
1463 unsigned long vm_flags;
1464 LIST_HEAD(l_hold); /* The pages which were snipped off */
1465 LIST_HEAD(l_active);
1466 LIST_HEAD(l_inactive);
1467 struct page *page;
1468 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1469 unsigned long nr_rotated = 0;
1470
1471 lru_add_drain();
1472 spin_lock_irq(&zone->lru_lock);
1473 if (scanning_global_lru(sc)) {
1474 nr_taken = isolate_pages_global(nr_pages, &l_hold,
1475 &pgscanned, sc->order,
1476 ISOLATE_ACTIVE, zone,
1477 1, file);
1478 zone->pages_scanned += pgscanned;
1479 } else {
1480 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1481 &pgscanned, sc->order,
1482 ISOLATE_ACTIVE, zone,
1483 sc->mem_cgroup, 1, file);
1484 /*
1485 * mem_cgroup_isolate_pages() keeps track of
1486 * scanned pages on its own.
1487 */
1488 }
1489
1490 reclaim_stat->recent_scanned[file] += nr_taken;
1491
1492 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1493 if (file)
1494 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1495 else
1496 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1497 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1498 spin_unlock_irq(&zone->lru_lock);
1499
1500 while (!list_empty(&l_hold)) {
1501 cond_resched();
1502 page = lru_to_page(&l_hold);
1503 list_del(&page->lru);
1504
1505 if (unlikely(!page_evictable(page, NULL))) {
1506 putback_lru_page(page);
1507 continue;
1508 }
1509
1510 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1511 nr_rotated++;
1512 /*
1513 * Identify referenced, file-backed active pages and
1514 * give them one more trip around the active list. So
1515 * that executable code get better chances to stay in
1516 * memory under moderate memory pressure. Anon pages
1517 * are not likely to be evicted by use-once streaming
1518 * IO, plus JVM can create lots of anon VM_EXEC pages,
1519 * so we ignore them here.
1520 */
1521 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1522 list_add(&page->lru, &l_active);
1523 continue;
1524 }
1525 }
1526
1527 ClearPageActive(page); /* we are de-activating */
1528 list_add(&page->lru, &l_inactive);
1529 }
1530
1531 /*
1532 * Move pages back to the lru list.
1533 */
1534 spin_lock_irq(&zone->lru_lock);
1535 /*
1536 * Count referenced pages from currently used mappings as rotated,
1537 * even though only some of them are actually re-activated. This
1538 * helps balance scan pressure between file and anonymous pages in
1539 * get_scan_ratio.
1540 */
1541 reclaim_stat->recent_rotated[file] += nr_rotated;
1542
1543 move_active_pages_to_lru(zone, &l_active,
1544 LRU_ACTIVE + file * LRU_FILE);
1545 move_active_pages_to_lru(zone, &l_inactive,
1546 LRU_BASE + file * LRU_FILE);
1547 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1548 spin_unlock_irq(&zone->lru_lock);
1549 }
1550
1551 #ifdef CONFIG_SWAP
1552 static int inactive_anon_is_low_global(struct zone *zone)
1553 {
1554 unsigned long active, inactive;
1555
1556 active = zone_page_state(zone, NR_ACTIVE_ANON);
1557 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1558
1559 if (inactive * zone->inactive_ratio < active)
1560 return 1;
1561
1562 return 0;
1563 }
1564
1565 /**
1566 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1567 * @zone: zone to check
1568 * @sc: scan control of this context
1569 *
1570 * Returns true if the zone does not have enough inactive anon pages,
1571 * meaning some active anon pages need to be deactivated.
1572 */
1573 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1574 {
1575 int low;
1576
1577 /*
1578 * If we don't have swap space, anonymous page deactivation
1579 * is pointless.
1580 */
1581 if (!total_swap_pages)
1582 return 0;
1583
1584 if (scanning_global_lru(sc))
1585 low = inactive_anon_is_low_global(zone);
1586 else
1587 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1588 return low;
1589 }
1590 #else
1591 static inline int inactive_anon_is_low(struct zone *zone,
1592 struct scan_control *sc)
1593 {
1594 return 0;
1595 }
1596 #endif
1597
1598 static int inactive_file_is_low_global(struct zone *zone)
1599 {
1600 unsigned long active, inactive;
1601
1602 active = zone_page_state(zone, NR_ACTIVE_FILE);
1603 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1604
1605 return (active > inactive);
1606 }
1607
1608 /**
1609 * inactive_file_is_low - check if file pages need to be deactivated
1610 * @zone: zone to check
1611 * @sc: scan control of this context
1612 *
1613 * When the system is doing streaming IO, memory pressure here
1614 * ensures that active file pages get deactivated, until more
1615 * than half of the file pages are on the inactive list.
1616 *
1617 * Once we get to that situation, protect the system's working
1618 * set from being evicted by disabling active file page aging.
1619 *
1620 * This uses a different ratio than the anonymous pages, because
1621 * the page cache uses a use-once replacement algorithm.
1622 */
1623 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1624 {
1625 int low;
1626
1627 if (scanning_global_lru(sc))
1628 low = inactive_file_is_low_global(zone);
1629 else
1630 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1631 return low;
1632 }
1633
1634 static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1635 int file)
1636 {
1637 if (file)
1638 return inactive_file_is_low(zone, sc);
1639 else
1640 return inactive_anon_is_low(zone, sc);
1641 }
1642
1643 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1644 struct zone *zone, struct scan_control *sc, int priority)
1645 {
1646 int file = is_file_lru(lru);
1647
1648 if (is_active_lru(lru)) {
1649 if (inactive_list_is_low(zone, sc, file))
1650 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1651 return 0;
1652 }
1653
1654 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1655 }
1656
1657 /*
1658 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1659 * until we collected @swap_cluster_max pages to scan.
1660 */
1661 static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1662 unsigned long *nr_saved_scan)
1663 {
1664 unsigned long nr;
1665
1666 *nr_saved_scan += nr_to_scan;
1667 nr = *nr_saved_scan;
1668
1669 if (nr >= SWAP_CLUSTER_MAX)
1670 *nr_saved_scan = 0;
1671 else
1672 nr = 0;
1673
1674 return nr;
1675 }
1676
1677 /*
1678 * Determine how aggressively the anon and file LRU lists should be
1679 * scanned. The relative value of each set of LRU lists is determined
1680 * by looking at the fraction of the pages scanned we did rotate back
1681 * onto the active list instead of evict.
1682 *
1683 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1684 */
1685 static void get_scan_count(struct zone *zone, struct scan_control *sc,
1686 unsigned long *nr, int priority)
1687 {
1688 unsigned long anon, file, free;
1689 unsigned long anon_prio, file_prio;
1690 unsigned long ap, fp;
1691 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1692 u64 fraction[2], denominator;
1693 enum lru_list l;
1694 int noswap = 0;
1695
1696 /* If we have no swap space, do not bother scanning anon pages. */
1697 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1698 noswap = 1;
1699 fraction[0] = 0;
1700 fraction[1] = 1;
1701 denominator = 1;
1702 goto out;
1703 }
1704
1705 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1706 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1707 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1708 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1709
1710 if (scanning_global_lru(sc)) {
1711 free = zone_page_state(zone, NR_FREE_PAGES);
1712 /* If we have very few page cache pages,
1713 force-scan anon pages. */
1714 if (unlikely(file + free <= high_wmark_pages(zone))) {
1715 fraction[0] = 1;
1716 fraction[1] = 0;
1717 denominator = 1;
1718 goto out;
1719 }
1720 }
1721
1722 /*
1723 * With swappiness at 100, anonymous and file have the same priority.
1724 * This scanning priority is essentially the inverse of IO cost.
1725 */
1726 anon_prio = sc->swappiness;
1727 file_prio = 200 - sc->swappiness;
1728
1729 /*
1730 * OK, so we have swap space and a fair amount of page cache
1731 * pages. We use the recently rotated / recently scanned
1732 * ratios to determine how valuable each cache is.
1733 *
1734 * Because workloads change over time (and to avoid overflow)
1735 * we keep these statistics as a floating average, which ends
1736 * up weighing recent references more than old ones.
1737 *
1738 * anon in [0], file in [1]
1739 */
1740 spin_lock_irq(&zone->lru_lock);
1741 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1742 reclaim_stat->recent_scanned[0] /= 2;
1743 reclaim_stat->recent_rotated[0] /= 2;
1744 }
1745
1746 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1747 reclaim_stat->recent_scanned[1] /= 2;
1748 reclaim_stat->recent_rotated[1] /= 2;
1749 }
1750
1751 /*
1752 * The amount of pressure on anon vs file pages is inversely
1753 * proportional to the fraction of recently scanned pages on
1754 * each list that were recently referenced and in active use.
1755 */
1756 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1757 ap /= reclaim_stat->recent_rotated[0] + 1;
1758
1759 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1760 fp /= reclaim_stat->recent_rotated[1] + 1;
1761 spin_unlock_irq(&zone->lru_lock);
1762
1763 fraction[0] = ap;
1764 fraction[1] = fp;
1765 denominator = ap + fp + 1;
1766 out:
1767 for_each_evictable_lru(l) {
1768 int file = is_file_lru(l);
1769 unsigned long scan;
1770
1771 scan = zone_nr_lru_pages(zone, sc, l);
1772 if (priority || noswap) {
1773 scan >>= priority;
1774 scan = div64_u64(scan * fraction[file], denominator);
1775 }
1776 nr[l] = nr_scan_try_batch(scan,
1777 &reclaim_stat->nr_saved_scan[l]);
1778 }
1779 }
1780
1781 /*
1782 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1783 */
1784 static void shrink_zone(int priority, struct zone *zone,
1785 struct scan_control *sc)
1786 {
1787 unsigned long nr[NR_LRU_LISTS];
1788 unsigned long nr_to_scan;
1789 enum lru_list l;
1790 unsigned long nr_reclaimed = sc->nr_reclaimed;
1791 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1792
1793 get_scan_count(zone, sc, nr, priority);
1794
1795 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1796 nr[LRU_INACTIVE_FILE]) {
1797 for_each_evictable_lru(l) {
1798 if (nr[l]) {
1799 nr_to_scan = min_t(unsigned long,
1800 nr[l], SWAP_CLUSTER_MAX);
1801 nr[l] -= nr_to_scan;
1802
1803 nr_reclaimed += shrink_list(l, nr_to_scan,
1804 zone, sc, priority);
1805 }
1806 }
1807 /*
1808 * On large memory systems, scan >> priority can become
1809 * really large. This is fine for the starting priority;
1810 * we want to put equal scanning pressure on each zone.
1811 * However, if the VM has a harder time of freeing pages,
1812 * with multiple processes reclaiming pages, the total
1813 * freeing target can get unreasonably large.
1814 */
1815 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1816 break;
1817 }
1818
1819 sc->nr_reclaimed = nr_reclaimed;
1820
1821 /*
1822 * Even if we did not try to evict anon pages at all, we want to
1823 * rebalance the anon lru active/inactive ratio.
1824 */
1825 if (inactive_anon_is_low(zone, sc))
1826 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1827
1828 throttle_vm_writeout(sc->gfp_mask);
1829 }
1830
1831 /*
1832 * This is the direct reclaim path, for page-allocating processes. We only
1833 * try to reclaim pages from zones which will satisfy the caller's allocation
1834 * request.
1835 *
1836 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1837 * Because:
1838 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1839 * allocation or
1840 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1841 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1842 * zone defense algorithm.
1843 *
1844 * If a zone is deemed to be full of pinned pages then just give it a light
1845 * scan then give up on it.
1846 */
1847 static void shrink_zones(int priority, struct zonelist *zonelist,
1848 struct scan_control *sc)
1849 {
1850 struct zoneref *z;
1851 struct zone *zone;
1852
1853 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1854 gfp_zone(sc->gfp_mask), sc->nodemask) {
1855 if (!populated_zone(zone))
1856 continue;
1857 /*
1858 * Take care memory controller reclaiming has small influence
1859 * to global LRU.
1860 */
1861 if (scanning_global_lru(sc)) {
1862 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1863 continue;
1864 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1865 continue; /* Let kswapd poll it */
1866 }
1867
1868 shrink_zone(priority, zone, sc);
1869 }
1870 }
1871
1872 static bool zone_reclaimable(struct zone *zone)
1873 {
1874 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1875 }
1876
1877 /*
1878 * As hibernation is going on, kswapd is freezed so that it can't mark
1879 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1880 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1881 */
1882 static bool all_unreclaimable(struct zonelist *zonelist,
1883 struct scan_control *sc)
1884 {
1885 struct zoneref *z;
1886 struct zone *zone;
1887 bool all_unreclaimable = true;
1888
1889 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1890 gfp_zone(sc->gfp_mask), sc->nodemask) {
1891 if (!populated_zone(zone))
1892 continue;
1893 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1894 continue;
1895 if (zone_reclaimable(zone)) {
1896 all_unreclaimable = false;
1897 break;
1898 }
1899 }
1900
1901 return all_unreclaimable;
1902 }
1903
1904 /*
1905 * This is the main entry point to direct page reclaim.
1906 *
1907 * If a full scan of the inactive list fails to free enough memory then we
1908 * are "out of memory" and something needs to be killed.
1909 *
1910 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1911 * high - the zone may be full of dirty or under-writeback pages, which this
1912 * caller can't do much about. We kick the writeback threads and take explicit
1913 * naps in the hope that some of these pages can be written. But if the
1914 * allocating task holds filesystem locks which prevent writeout this might not
1915 * work, and the allocation attempt will fail.
1916 *
1917 * returns: 0, if no pages reclaimed
1918 * else, the number of pages reclaimed
1919 */
1920 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1921 struct scan_control *sc)
1922 {
1923 int priority;
1924 unsigned long total_scanned = 0;
1925 struct reclaim_state *reclaim_state = current->reclaim_state;
1926 struct zoneref *z;
1927 struct zone *zone;
1928 unsigned long writeback_threshold;
1929
1930 get_mems_allowed();
1931 delayacct_freepages_start();
1932
1933 if (scanning_global_lru(sc))
1934 count_vm_event(ALLOCSTALL);
1935
1936 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1937 sc->nr_scanned = 0;
1938 if (!priority)
1939 disable_swap_token();
1940 shrink_zones(priority, zonelist, sc);
1941 /*
1942 * Don't shrink slabs when reclaiming memory from
1943 * over limit cgroups
1944 */
1945 if (scanning_global_lru(sc)) {
1946 unsigned long lru_pages = 0;
1947 for_each_zone_zonelist(zone, z, zonelist,
1948 gfp_zone(sc->gfp_mask)) {
1949 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1950 continue;
1951
1952 lru_pages += zone_reclaimable_pages(zone);
1953 }
1954
1955 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1956 if (reclaim_state) {
1957 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1958 reclaim_state->reclaimed_slab = 0;
1959 }
1960 }
1961 total_scanned += sc->nr_scanned;
1962 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
1963 goto out;
1964
1965 /*
1966 * Try to write back as many pages as we just scanned. This
1967 * tends to cause slow streaming writers to write data to the
1968 * disk smoothly, at the dirtying rate, which is nice. But
1969 * that's undesirable in laptop mode, where we *want* lumpy
1970 * writeout. So in laptop mode, write out the whole world.
1971 */
1972 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1973 if (total_scanned > writeback_threshold) {
1974 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1975 sc->may_writepage = 1;
1976 }
1977
1978 /* Take a nap, wait for some writeback to complete */
1979 if (!sc->hibernation_mode && sc->nr_scanned &&
1980 priority < DEF_PRIORITY - 2)
1981 congestion_wait(BLK_RW_ASYNC, HZ/10);
1982 }
1983
1984 out:
1985 delayacct_freepages_end();
1986 put_mems_allowed();
1987
1988 if (sc->nr_reclaimed)
1989 return sc->nr_reclaimed;
1990
1991 /* top priority shrink_zones still had more to do? don't OOM, then */
1992 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
1993 return 1;
1994
1995 return 0;
1996 }
1997
1998 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1999 gfp_t gfp_mask, nodemask_t *nodemask)
2000 {
2001 unsigned long nr_reclaimed;
2002 struct scan_control sc = {
2003 .gfp_mask = gfp_mask,
2004 .may_writepage = !laptop_mode,
2005 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2006 .may_unmap = 1,
2007 .may_swap = 1,
2008 .swappiness = vm_swappiness,
2009 .order = order,
2010 .mem_cgroup = NULL,
2011 .nodemask = nodemask,
2012 };
2013
2014 trace_mm_vmscan_direct_reclaim_begin(order,
2015 sc.may_writepage,
2016 gfp_mask);
2017
2018 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2019
2020 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2021
2022 return nr_reclaimed;
2023 }
2024
2025 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2026
2027 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2028 gfp_t gfp_mask, bool noswap,
2029 unsigned int swappiness,
2030 struct zone *zone)
2031 {
2032 struct scan_control sc = {
2033 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2034 .may_writepage = !laptop_mode,
2035 .may_unmap = 1,
2036 .may_swap = !noswap,
2037 .swappiness = swappiness,
2038 .order = 0,
2039 .mem_cgroup = mem,
2040 };
2041 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2042 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2043
2044 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2045 sc.may_writepage,
2046 sc.gfp_mask);
2047
2048 /*
2049 * NOTE: Although we can get the priority field, using it
2050 * here is not a good idea, since it limits the pages we can scan.
2051 * if we don't reclaim here, the shrink_zone from balance_pgdat
2052 * will pick up pages from other mem cgroup's as well. We hack
2053 * the priority and make it zero.
2054 */
2055 shrink_zone(0, zone, &sc);
2056
2057 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2058
2059 return sc.nr_reclaimed;
2060 }
2061
2062 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2063 gfp_t gfp_mask,
2064 bool noswap,
2065 unsigned int swappiness)
2066 {
2067 struct zonelist *zonelist;
2068 unsigned long nr_reclaimed;
2069 struct scan_control sc = {
2070 .may_writepage = !laptop_mode,
2071 .may_unmap = 1,
2072 .may_swap = !noswap,
2073 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2074 .swappiness = swappiness,
2075 .order = 0,
2076 .mem_cgroup = mem_cont,
2077 .nodemask = NULL, /* we don't care the placement */
2078 };
2079
2080 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2081 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2082 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2083
2084 trace_mm_vmscan_memcg_reclaim_begin(0,
2085 sc.may_writepage,
2086 sc.gfp_mask);
2087
2088 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2089
2090 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2091
2092 return nr_reclaimed;
2093 }
2094 #endif
2095
2096 /* is kswapd sleeping prematurely? */
2097 static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
2098 {
2099 int i;
2100
2101 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2102 if (remaining)
2103 return 1;
2104
2105 /* If after HZ/10, a zone is below the high mark, it's premature */
2106 for (i = 0; i < pgdat->nr_zones; i++) {
2107 struct zone *zone = pgdat->node_zones + i;
2108
2109 if (!populated_zone(zone))
2110 continue;
2111
2112 if (zone->all_unreclaimable)
2113 continue;
2114
2115 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
2116 0, 0))
2117 return 1;
2118 }
2119
2120 return 0;
2121 }
2122
2123 /*
2124 * For kswapd, balance_pgdat() will work across all this node's zones until
2125 * they are all at high_wmark_pages(zone).
2126 *
2127 * Returns the number of pages which were actually freed.
2128 *
2129 * There is special handling here for zones which are full of pinned pages.
2130 * This can happen if the pages are all mlocked, or if they are all used by
2131 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2132 * What we do is to detect the case where all pages in the zone have been
2133 * scanned twice and there has been zero successful reclaim. Mark the zone as
2134 * dead and from now on, only perform a short scan. Basically we're polling
2135 * the zone for when the problem goes away.
2136 *
2137 * kswapd scans the zones in the highmem->normal->dma direction. It skips
2138 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2139 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2140 * lower zones regardless of the number of free pages in the lower zones. This
2141 * interoperates with the page allocator fallback scheme to ensure that aging
2142 * of pages is balanced across the zones.
2143 */
2144 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
2145 {
2146 int all_zones_ok;
2147 int priority;
2148 int i;
2149 unsigned long total_scanned;
2150 struct reclaim_state *reclaim_state = current->reclaim_state;
2151 struct scan_control sc = {
2152 .gfp_mask = GFP_KERNEL,
2153 .may_unmap = 1,
2154 .may_swap = 1,
2155 /*
2156 * kswapd doesn't want to be bailed out while reclaim. because
2157 * we want to put equal scanning pressure on each zone.
2158 */
2159 .nr_to_reclaim = ULONG_MAX,
2160 .swappiness = vm_swappiness,
2161 .order = order,
2162 .mem_cgroup = NULL,
2163 };
2164 loop_again:
2165 total_scanned = 0;
2166 sc.nr_reclaimed = 0;
2167 sc.may_writepage = !laptop_mode;
2168 count_vm_event(PAGEOUTRUN);
2169
2170 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2171 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2172 unsigned long lru_pages = 0;
2173 int has_under_min_watermark_zone = 0;
2174
2175 /* The swap token gets in the way of swapout... */
2176 if (!priority)
2177 disable_swap_token();
2178
2179 all_zones_ok = 1;
2180
2181 /*
2182 * Scan in the highmem->dma direction for the highest
2183 * zone which needs scanning
2184 */
2185 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2186 struct zone *zone = pgdat->node_zones + i;
2187
2188 if (!populated_zone(zone))
2189 continue;
2190
2191 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2192 continue;
2193
2194 /*
2195 * Do some background aging of the anon list, to give
2196 * pages a chance to be referenced before reclaiming.
2197 */
2198 if (inactive_anon_is_low(zone, &sc))
2199 shrink_active_list(SWAP_CLUSTER_MAX, zone,
2200 &sc, priority, 0);
2201
2202 if (!zone_watermark_ok(zone, order,
2203 high_wmark_pages(zone), 0, 0)) {
2204 end_zone = i;
2205 break;
2206 }
2207 }
2208 if (i < 0)
2209 goto out;
2210
2211 for (i = 0; i <= end_zone; i++) {
2212 struct zone *zone = pgdat->node_zones + i;
2213
2214 lru_pages += zone_reclaimable_pages(zone);
2215 }
2216
2217 /*
2218 * Now scan the zone in the dma->highmem direction, stopping
2219 * at the last zone which needs scanning.
2220 *
2221 * We do this because the page allocator works in the opposite
2222 * direction. This prevents the page allocator from allocating
2223 * pages behind kswapd's direction of progress, which would
2224 * cause too much scanning of the lower zones.
2225 */
2226 for (i = 0; i <= end_zone; i++) {
2227 struct zone *zone = pgdat->node_zones + i;
2228 int nr_slab;
2229
2230 if (!populated_zone(zone))
2231 continue;
2232
2233 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2234 continue;
2235
2236 sc.nr_scanned = 0;
2237
2238 /*
2239 * Call soft limit reclaim before calling shrink_zone.
2240 * For now we ignore the return value
2241 */
2242 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2243
2244 /*
2245 * We put equal pressure on every zone, unless one
2246 * zone has way too many pages free already.
2247 */
2248 if (!zone_watermark_ok(zone, order,
2249 8*high_wmark_pages(zone), end_zone, 0))
2250 shrink_zone(priority, zone, &sc);
2251 reclaim_state->reclaimed_slab = 0;
2252 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2253 lru_pages);
2254 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2255 total_scanned += sc.nr_scanned;
2256 if (zone->all_unreclaimable)
2257 continue;
2258 if (nr_slab == 0 && !zone_reclaimable(zone))
2259 zone->all_unreclaimable = 1;
2260 /*
2261 * If we've done a decent amount of scanning and
2262 * the reclaim ratio is low, start doing writepage
2263 * even in laptop mode
2264 */
2265 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2266 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2267 sc.may_writepage = 1;
2268
2269 if (!zone_watermark_ok(zone, order,
2270 high_wmark_pages(zone), end_zone, 0)) {
2271 all_zones_ok = 0;
2272 /*
2273 * We are still under min water mark. This
2274 * means that we have a GFP_ATOMIC allocation
2275 * failure risk. Hurry up!
2276 */
2277 if (!zone_watermark_ok(zone, order,
2278 min_wmark_pages(zone), end_zone, 0))
2279 has_under_min_watermark_zone = 1;
2280 }
2281
2282 }
2283 if (all_zones_ok)
2284 break; /* kswapd: all done */
2285 /*
2286 * OK, kswapd is getting into trouble. Take a nap, then take
2287 * another pass across the zones.
2288 */
2289 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2290 if (has_under_min_watermark_zone)
2291 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2292 else
2293 congestion_wait(BLK_RW_ASYNC, HZ/10);
2294 }
2295
2296 /*
2297 * We do this so kswapd doesn't build up large priorities for
2298 * example when it is freeing in parallel with allocators. It
2299 * matches the direct reclaim path behaviour in terms of impact
2300 * on zone->*_priority.
2301 */
2302 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2303 break;
2304 }
2305 out:
2306 if (!all_zones_ok) {
2307 cond_resched();
2308
2309 try_to_freeze();
2310
2311 /*
2312 * Fragmentation may mean that the system cannot be
2313 * rebalanced for high-order allocations in all zones.
2314 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2315 * it means the zones have been fully scanned and are still
2316 * not balanced. For high-order allocations, there is
2317 * little point trying all over again as kswapd may
2318 * infinite loop.
2319 *
2320 * Instead, recheck all watermarks at order-0 as they
2321 * are the most important. If watermarks are ok, kswapd will go
2322 * back to sleep. High-order users can still perform direct
2323 * reclaim if they wish.
2324 */
2325 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2326 order = sc.order = 0;
2327
2328 goto loop_again;
2329 }
2330
2331 return sc.nr_reclaimed;
2332 }
2333
2334 /*
2335 * The background pageout daemon, started as a kernel thread
2336 * from the init process.
2337 *
2338 * This basically trickles out pages so that we have _some_
2339 * free memory available even if there is no other activity
2340 * that frees anything up. This is needed for things like routing
2341 * etc, where we otherwise might have all activity going on in
2342 * asynchronous contexts that cannot page things out.
2343 *
2344 * If there are applications that are active memory-allocators
2345 * (most normal use), this basically shouldn't matter.
2346 */
2347 static int kswapd(void *p)
2348 {
2349 unsigned long order;
2350 pg_data_t *pgdat = (pg_data_t*)p;
2351 struct task_struct *tsk = current;
2352 DEFINE_WAIT(wait);
2353 struct reclaim_state reclaim_state = {
2354 .reclaimed_slab = 0,
2355 };
2356 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2357
2358 lockdep_set_current_reclaim_state(GFP_KERNEL);
2359
2360 if (!cpumask_empty(cpumask))
2361 set_cpus_allowed_ptr(tsk, cpumask);
2362 current->reclaim_state = &reclaim_state;
2363
2364 /*
2365 * Tell the memory management that we're a "memory allocator",
2366 * and that if we need more memory we should get access to it
2367 * regardless (see "__alloc_pages()"). "kswapd" should
2368 * never get caught in the normal page freeing logic.
2369 *
2370 * (Kswapd normally doesn't need memory anyway, but sometimes
2371 * you need a small amount of memory in order to be able to
2372 * page out something else, and this flag essentially protects
2373 * us from recursively trying to free more memory as we're
2374 * trying to free the first piece of memory in the first place).
2375 */
2376 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2377 set_freezable();
2378
2379 order = 0;
2380 for ( ; ; ) {
2381 unsigned long new_order;
2382 int ret;
2383
2384 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2385 new_order = pgdat->kswapd_max_order;
2386 pgdat->kswapd_max_order = 0;
2387 if (order < new_order) {
2388 /*
2389 * Don't sleep if someone wants a larger 'order'
2390 * allocation
2391 */
2392 order = new_order;
2393 } else {
2394 if (!freezing(current) && !kthread_should_stop()) {
2395 long remaining = 0;
2396
2397 /* Try to sleep for a short interval */
2398 if (!sleeping_prematurely(pgdat, order, remaining)) {
2399 remaining = schedule_timeout(HZ/10);
2400 finish_wait(&pgdat->kswapd_wait, &wait);
2401 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2402 }
2403
2404 /*
2405 * After a short sleep, check if it was a
2406 * premature sleep. If not, then go fully
2407 * to sleep until explicitly woken up
2408 */
2409 if (!sleeping_prematurely(pgdat, order, remaining)) {
2410 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2411 schedule();
2412 } else {
2413 if (remaining)
2414 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2415 else
2416 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2417 }
2418 }
2419
2420 order = pgdat->kswapd_max_order;
2421 }
2422 finish_wait(&pgdat->kswapd_wait, &wait);
2423
2424 ret = try_to_freeze();
2425 if (kthread_should_stop())
2426 break;
2427
2428 /*
2429 * We can speed up thawing tasks if we don't call balance_pgdat
2430 * after returning from the refrigerator
2431 */
2432 if (!ret) {
2433 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2434 balance_pgdat(pgdat, order);
2435 }
2436 }
2437 return 0;
2438 }
2439
2440 /*
2441 * A zone is low on free memory, so wake its kswapd task to service it.
2442 */
2443 void wakeup_kswapd(struct zone *zone, int order)
2444 {
2445 pg_data_t *pgdat;
2446
2447 if (!populated_zone(zone))
2448 return;
2449
2450 pgdat = zone->zone_pgdat;
2451 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2452 return;
2453 if (pgdat->kswapd_max_order < order)
2454 pgdat->kswapd_max_order = order;
2455 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2456 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2457 return;
2458 if (!waitqueue_active(&pgdat->kswapd_wait))
2459 return;
2460 wake_up_interruptible(&pgdat->kswapd_wait);
2461 }
2462
2463 /*
2464 * The reclaimable count would be mostly accurate.
2465 * The less reclaimable pages may be
2466 * - mlocked pages, which will be moved to unevictable list when encountered
2467 * - mapped pages, which may require several travels to be reclaimed
2468 * - dirty pages, which is not "instantly" reclaimable
2469 */
2470 unsigned long global_reclaimable_pages(void)
2471 {
2472 int nr;
2473
2474 nr = global_page_state(NR_ACTIVE_FILE) +
2475 global_page_state(NR_INACTIVE_FILE);
2476
2477 if (nr_swap_pages > 0)
2478 nr += global_page_state(NR_ACTIVE_ANON) +
2479 global_page_state(NR_INACTIVE_ANON);
2480
2481 return nr;
2482 }
2483
2484 unsigned long zone_reclaimable_pages(struct zone *zone)
2485 {
2486 int nr;
2487
2488 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2489 zone_page_state(zone, NR_INACTIVE_FILE);
2490
2491 if (nr_swap_pages > 0)
2492 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2493 zone_page_state(zone, NR_INACTIVE_ANON);
2494
2495 return nr;
2496 }
2497
2498 #ifdef CONFIG_HIBERNATION
2499 /*
2500 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2501 * freed pages.
2502 *
2503 * Rather than trying to age LRUs the aim is to preserve the overall
2504 * LRU order by reclaiming preferentially
2505 * inactive > active > active referenced > active mapped
2506 */
2507 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2508 {
2509 struct reclaim_state reclaim_state;
2510 struct scan_control sc = {
2511 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2512 .may_swap = 1,
2513 .may_unmap = 1,
2514 .may_writepage = 1,
2515 .nr_to_reclaim = nr_to_reclaim,
2516 .hibernation_mode = 1,
2517 .swappiness = vm_swappiness,
2518 .order = 0,
2519 };
2520 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2521 struct task_struct *p = current;
2522 unsigned long nr_reclaimed;
2523
2524 p->flags |= PF_MEMALLOC;
2525 lockdep_set_current_reclaim_state(sc.gfp_mask);
2526 reclaim_state.reclaimed_slab = 0;
2527 p->reclaim_state = &reclaim_state;
2528
2529 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2530
2531 p->reclaim_state = NULL;
2532 lockdep_clear_current_reclaim_state();
2533 p->flags &= ~PF_MEMALLOC;
2534
2535 return nr_reclaimed;
2536 }
2537 #endif /* CONFIG_HIBERNATION */
2538
2539 /* It's optimal to keep kswapds on the same CPUs as their memory, but
2540 not required for correctness. So if the last cpu in a node goes
2541 away, we get changed to run anywhere: as the first one comes back,
2542 restore their cpu bindings. */
2543 static int __devinit cpu_callback(struct notifier_block *nfb,
2544 unsigned long action, void *hcpu)
2545 {
2546 int nid;
2547
2548 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2549 for_each_node_state(nid, N_HIGH_MEMORY) {
2550 pg_data_t *pgdat = NODE_DATA(nid);
2551 const struct cpumask *mask;
2552
2553 mask = cpumask_of_node(pgdat->node_id);
2554
2555 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2556 /* One of our CPUs online: restore mask */
2557 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2558 }
2559 }
2560 return NOTIFY_OK;
2561 }
2562
2563 /*
2564 * This kswapd start function will be called by init and node-hot-add.
2565 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2566 */
2567 int kswapd_run(int nid)
2568 {
2569 pg_data_t *pgdat = NODE_DATA(nid);
2570 int ret = 0;
2571
2572 if (pgdat->kswapd)
2573 return 0;
2574
2575 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2576 if (IS_ERR(pgdat->kswapd)) {
2577 /* failure at boot is fatal */
2578 BUG_ON(system_state == SYSTEM_BOOTING);
2579 printk("Failed to start kswapd on node %d\n",nid);
2580 ret = -1;
2581 }
2582 return ret;
2583 }
2584
2585 /*
2586 * Called by memory hotplug when all memory in a node is offlined.
2587 */
2588 void kswapd_stop(int nid)
2589 {
2590 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2591
2592 if (kswapd)
2593 kthread_stop(kswapd);
2594 }
2595
2596 static int __init kswapd_init(void)
2597 {
2598 int nid;
2599
2600 swap_setup();
2601 for_each_node_state(nid, N_HIGH_MEMORY)
2602 kswapd_run(nid);
2603 hotcpu_notifier(cpu_callback, 0);
2604 return 0;
2605 }
2606
2607 module_init(kswapd_init)
2608
2609 #ifdef CONFIG_NUMA
2610 /*
2611 * Zone reclaim mode
2612 *
2613 * If non-zero call zone_reclaim when the number of free pages falls below
2614 * the watermarks.
2615 */
2616 int zone_reclaim_mode __read_mostly;
2617
2618 #define RECLAIM_OFF 0
2619 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
2620 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
2621 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
2622
2623 /*
2624 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2625 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2626 * a zone.
2627 */
2628 #define ZONE_RECLAIM_PRIORITY 4
2629
2630 /*
2631 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2632 * occur.
2633 */
2634 int sysctl_min_unmapped_ratio = 1;
2635
2636 /*
2637 * If the number of slab pages in a zone grows beyond this percentage then
2638 * slab reclaim needs to occur.
2639 */
2640 int sysctl_min_slab_ratio = 5;
2641
2642 static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2643 {
2644 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2645 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2646 zone_page_state(zone, NR_ACTIVE_FILE);
2647
2648 /*
2649 * It's possible for there to be more file mapped pages than
2650 * accounted for by the pages on the file LRU lists because
2651 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2652 */
2653 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2654 }
2655
2656 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
2657 static long zone_pagecache_reclaimable(struct zone *zone)
2658 {
2659 long nr_pagecache_reclaimable;
2660 long delta = 0;
2661
2662 /*
2663 * If RECLAIM_SWAP is set, then all file pages are considered
2664 * potentially reclaimable. Otherwise, we have to worry about
2665 * pages like swapcache and zone_unmapped_file_pages() provides
2666 * a better estimate
2667 */
2668 if (zone_reclaim_mode & RECLAIM_SWAP)
2669 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2670 else
2671 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2672
2673 /* If we can't clean pages, remove dirty pages from consideration */
2674 if (!(zone_reclaim_mode & RECLAIM_WRITE))
2675 delta += zone_page_state(zone, NR_FILE_DIRTY);
2676
2677 /* Watch for any possible underflows due to delta */
2678 if (unlikely(delta > nr_pagecache_reclaimable))
2679 delta = nr_pagecache_reclaimable;
2680
2681 return nr_pagecache_reclaimable - delta;
2682 }
2683
2684 /*
2685 * Try to free up some pages from this zone through reclaim.
2686 */
2687 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2688 {
2689 /* Minimum pages needed in order to stay on node */
2690 const unsigned long nr_pages = 1 << order;
2691 struct task_struct *p = current;
2692 struct reclaim_state reclaim_state;
2693 int priority;
2694 struct scan_control sc = {
2695 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2696 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2697 .may_swap = 1,
2698 .nr_to_reclaim = max_t(unsigned long, nr_pages,
2699 SWAP_CLUSTER_MAX),
2700 .gfp_mask = gfp_mask,
2701 .swappiness = vm_swappiness,
2702 .order = order,
2703 };
2704 unsigned long nr_slab_pages0, nr_slab_pages1;
2705
2706 cond_resched();
2707 /*
2708 * We need to be able to allocate from the reserves for RECLAIM_SWAP
2709 * and we also need to be able to write out pages for RECLAIM_WRITE
2710 * and RECLAIM_SWAP.
2711 */
2712 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2713 lockdep_set_current_reclaim_state(gfp_mask);
2714 reclaim_state.reclaimed_slab = 0;
2715 p->reclaim_state = &reclaim_state;
2716
2717 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2718 /*
2719 * Free memory by calling shrink zone with increasing
2720 * priorities until we have enough memory freed.
2721 */
2722 priority = ZONE_RECLAIM_PRIORITY;
2723 do {
2724 shrink_zone(priority, zone, &sc);
2725 priority--;
2726 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2727 }
2728
2729 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2730 if (nr_slab_pages0 > zone->min_slab_pages) {
2731 /*
2732 * shrink_slab() does not currently allow us to determine how
2733 * many pages were freed in this zone. So we take the current
2734 * number of slab pages and shake the slab until it is reduced
2735 * by the same nr_pages that we used for reclaiming unmapped
2736 * pages.
2737 *
2738 * Note that shrink_slab will free memory on all zones and may
2739 * take a long time.
2740 */
2741 for (;;) {
2742 unsigned long lru_pages = zone_reclaimable_pages(zone);
2743
2744 /* No reclaimable slab or very low memory pressure */
2745 if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
2746 break;
2747
2748 /* Freed enough memory */
2749 nr_slab_pages1 = zone_page_state(zone,
2750 NR_SLAB_RECLAIMABLE);
2751 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
2752 break;
2753 }
2754
2755 /*
2756 * Update nr_reclaimed by the number of slab pages we
2757 * reclaimed from this zone.
2758 */
2759 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2760 if (nr_slab_pages1 < nr_slab_pages0)
2761 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
2762 }
2763
2764 p->reclaim_state = NULL;
2765 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2766 lockdep_clear_current_reclaim_state();
2767 return sc.nr_reclaimed >= nr_pages;
2768 }
2769
2770 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2771 {
2772 int node_id;
2773 int ret;
2774
2775 /*
2776 * Zone reclaim reclaims unmapped file backed pages and
2777 * slab pages if we are over the defined limits.
2778 *
2779 * A small portion of unmapped file backed pages is needed for
2780 * file I/O otherwise pages read by file I/O will be immediately
2781 * thrown out if the zone is overallocated. So we do not reclaim
2782 * if less than a specified percentage of the zone is used by
2783 * unmapped file backed pages.
2784 */
2785 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2786 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2787 return ZONE_RECLAIM_FULL;
2788
2789 if (zone->all_unreclaimable)
2790 return ZONE_RECLAIM_FULL;
2791
2792 /*
2793 * Do not scan if the allocation should not be delayed.
2794 */
2795 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2796 return ZONE_RECLAIM_NOSCAN;
2797
2798 /*
2799 * Only run zone reclaim on the local zone or on zones that do not
2800 * have associated processors. This will favor the local processor
2801 * over remote processors and spread off node memory allocations
2802 * as wide as possible.
2803 */
2804 node_id = zone_to_nid(zone);
2805 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2806 return ZONE_RECLAIM_NOSCAN;
2807
2808 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2809 return ZONE_RECLAIM_NOSCAN;
2810
2811 ret = __zone_reclaim(zone, gfp_mask, order);
2812 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2813
2814 if (!ret)
2815 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2816
2817 return ret;
2818 }
2819 #endif
2820
2821 /*
2822 * page_evictable - test whether a page is evictable
2823 * @page: the page to test
2824 * @vma: the VMA in which the page is or will be mapped, may be NULL
2825 *
2826 * Test whether page is evictable--i.e., should be placed on active/inactive
2827 * lists vs unevictable list. The vma argument is !NULL when called from the
2828 * fault path to determine how to instantate a new page.
2829 *
2830 * Reasons page might not be evictable:
2831 * (1) page's mapping marked unevictable
2832 * (2) page is part of an mlocked VMA
2833 *
2834 */
2835 int page_evictable(struct page *page, struct vm_area_struct *vma)
2836 {
2837
2838 if (mapping_unevictable(page_mapping(page)))
2839 return 0;
2840
2841 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2842 return 0;
2843
2844 return 1;
2845 }
2846
2847 /**
2848 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2849 * @page: page to check evictability and move to appropriate lru list
2850 * @zone: zone page is in
2851 *
2852 * Checks a page for evictability and moves the page to the appropriate
2853 * zone lru list.
2854 *
2855 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2856 * have PageUnevictable set.
2857 */
2858 static void check_move_unevictable_page(struct page *page, struct zone *zone)
2859 {
2860 VM_BUG_ON(PageActive(page));
2861
2862 retry:
2863 ClearPageUnevictable(page);
2864 if (page_evictable(page, NULL)) {
2865 enum lru_list l = page_lru_base_type(page);
2866
2867 __dec_zone_state(zone, NR_UNEVICTABLE);
2868 list_move(&page->lru, &zone->lru[l].list);
2869 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2870 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2871 __count_vm_event(UNEVICTABLE_PGRESCUED);
2872 } else {
2873 /*
2874 * rotate unevictable list
2875 */
2876 SetPageUnevictable(page);
2877 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2878 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2879 if (page_evictable(page, NULL))
2880 goto retry;
2881 }
2882 }
2883
2884 /**
2885 * scan_mapping_unevictable_pages - scan an address space for evictable pages
2886 * @mapping: struct address_space to scan for evictable pages
2887 *
2888 * Scan all pages in mapping. Check unevictable pages for
2889 * evictability and move them to the appropriate zone lru list.
2890 */
2891 void scan_mapping_unevictable_pages(struct address_space *mapping)
2892 {
2893 pgoff_t next = 0;
2894 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2895 PAGE_CACHE_SHIFT;
2896 struct zone *zone;
2897 struct pagevec pvec;
2898
2899 if (mapping->nrpages == 0)
2900 return;
2901
2902 pagevec_init(&pvec, 0);
2903 while (next < end &&
2904 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2905 int i;
2906 int pg_scanned = 0;
2907
2908 zone = NULL;
2909
2910 for (i = 0; i < pagevec_count(&pvec); i++) {
2911 struct page *page = pvec.pages[i];
2912 pgoff_t page_index = page->index;
2913 struct zone *pagezone = page_zone(page);
2914
2915 pg_scanned++;
2916 if (page_index > next)
2917 next = page_index;
2918 next++;
2919
2920 if (pagezone != zone) {
2921 if (zone)
2922 spin_unlock_irq(&zone->lru_lock);
2923 zone = pagezone;
2924 spin_lock_irq(&zone->lru_lock);
2925 }
2926
2927 if (PageLRU(page) && PageUnevictable(page))
2928 check_move_unevictable_page(page, zone);
2929 }
2930 if (zone)
2931 spin_unlock_irq(&zone->lru_lock);
2932 pagevec_release(&pvec);
2933
2934 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2935 }
2936
2937 }
2938
2939 /**
2940 * scan_zone_unevictable_pages - check unevictable list for evictable pages
2941 * @zone - zone of which to scan the unevictable list
2942 *
2943 * Scan @zone's unevictable LRU lists to check for pages that have become
2944 * evictable. Move those that have to @zone's inactive list where they
2945 * become candidates for reclaim, unless shrink_inactive_zone() decides
2946 * to reactivate them. Pages that are still unevictable are rotated
2947 * back onto @zone's unevictable list.
2948 */
2949 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2950 static void scan_zone_unevictable_pages(struct zone *zone)
2951 {
2952 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2953 unsigned long scan;
2954 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2955
2956 while (nr_to_scan > 0) {
2957 unsigned long batch_size = min(nr_to_scan,
2958 SCAN_UNEVICTABLE_BATCH_SIZE);
2959
2960 spin_lock_irq(&zone->lru_lock);
2961 for (scan = 0; scan < batch_size; scan++) {
2962 struct page *page = lru_to_page(l_unevictable);
2963
2964 if (!trylock_page(page))
2965 continue;
2966
2967 prefetchw_prev_lru_page(page, l_unevictable, flags);
2968
2969 if (likely(PageLRU(page) && PageUnevictable(page)))
2970 check_move_unevictable_page(page, zone);
2971
2972 unlock_page(page);
2973 }
2974 spin_unlock_irq(&zone->lru_lock);
2975
2976 nr_to_scan -= batch_size;
2977 }
2978 }
2979
2980
2981 /**
2982 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2983 *
2984 * A really big hammer: scan all zones' unevictable LRU lists to check for
2985 * pages that have become evictable. Move those back to the zones'
2986 * inactive list where they become candidates for reclaim.
2987 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2988 * and we add swap to the system. As such, it runs in the context of a task
2989 * that has possibly/probably made some previously unevictable pages
2990 * evictable.
2991 */
2992 static void scan_all_zones_unevictable_pages(void)
2993 {
2994 struct zone *zone;
2995
2996 for_each_zone(zone) {
2997 scan_zone_unevictable_pages(zone);
2998 }
2999 }
3000
3001 /*
3002 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3003 * all nodes' unevictable lists for evictable pages
3004 */
3005 unsigned long scan_unevictable_pages;
3006
3007 int scan_unevictable_handler(struct ctl_table *table, int write,
3008 void __user *buffer,
3009 size_t *length, loff_t *ppos)
3010 {
3011 proc_doulongvec_minmax(table, write, buffer, length, ppos);
3012
3013 if (write && *(unsigned long *)table->data)
3014 scan_all_zones_unevictable_pages();
3015
3016 scan_unevictable_pages = 0;
3017 return 0;
3018 }
3019
3020 #ifdef CONFIG_NUMA
3021 /*
3022 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3023 * a specified node's per zone unevictable lists for evictable pages.
3024 */
3025
3026 static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3027 struct sysdev_attribute *attr,
3028 char *buf)
3029 {
3030 return sprintf(buf, "0\n"); /* always zero; should fit... */
3031 }
3032
3033 static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3034 struct sysdev_attribute *attr,
3035 const char *buf, size_t count)
3036 {
3037 struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
3038 struct zone *zone;
3039 unsigned long res;
3040 unsigned long req = strict_strtoul(buf, 10, &res);
3041
3042 if (!req)
3043 return 1; /* zero is no-op */
3044
3045 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3046 if (!populated_zone(zone))
3047 continue;
3048 scan_zone_unevictable_pages(zone);
3049 }
3050 return 1;
3051 }
3052
3053
3054 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3055 read_scan_unevictable_node,
3056 write_scan_unevictable_node);
3057
3058 int scan_unevictable_register_node(struct node *node)
3059 {
3060 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
3061 }
3062
3063 void scan_unevictable_unregister_node(struct node *node)
3064 {
3065 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
3066 }
3067 #endif
This page took 0.088846 seconds and 6 git commands to generate.