Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
183ff22b | 8 | * This file contains the default values for the operation of the |
1da177e4 LT |
9 | * Linux VM subsystem. Fine-tuning documentation can be found in |
10 | * Documentation/sysctl/vm.txt. | |
11 | * Started 18.12.91 | |
12 | * Swap aging added 23.2.95, Stephen Tweedie. | |
13 | * Buffermem limits added 12.3.98, Rik van Riel. | |
14 | */ | |
15 | ||
16 | #include <linux/mm.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/mman.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/pagevec.h> | |
23 | #include <linux/init.h> | |
b95f1b31 | 24 | #include <linux/export.h> |
1da177e4 | 25 | #include <linux/mm_inline.h> |
1da177e4 | 26 | #include <linux/percpu_counter.h> |
3565fce3 | 27 | #include <linux/memremap.h> |
1da177e4 LT |
28 | #include <linux/percpu.h> |
29 | #include <linux/cpu.h> | |
30 | #include <linux/notifier.h> | |
e0bf68dd | 31 | #include <linux/backing-dev.h> |
66e1707b | 32 | #include <linux/memcontrol.h> |
5a0e3ad6 | 33 | #include <linux/gfp.h> |
a27bb332 | 34 | #include <linux/uio.h> |
822fc613 | 35 | #include <linux/hugetlb.h> |
33c3fc71 | 36 | #include <linux/page_idle.h> |
1da177e4 | 37 | |
64d6519d LS |
38 | #include "internal.h" |
39 | ||
c6286c98 MG |
40 | #define CREATE_TRACE_POINTS |
41 | #include <trace/events/pagemap.h> | |
42 | ||
1da177e4 LT |
43 | /* How many pages do we try to swap or page in/out together? */ |
44 | int page_cluster; | |
45 | ||
13f7f789 | 46 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); |
f84f9504 | 47 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
cc5993bd | 48 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); |
10853a03 | 49 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
a4a921aa ML |
50 | #ifdef CONFIG_SMP |
51 | static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); | |
52 | #endif | |
902aaed0 | 53 | |
b221385b AB |
54 | /* |
55 | * This path almost never happens for VM activity - pages are normally | |
56 | * freed via pagevecs. But it gets used by networking. | |
57 | */ | |
920c7a5d | 58 | static void __page_cache_release(struct page *page) |
b221385b AB |
59 | { |
60 | if (PageLRU(page)) { | |
b221385b | 61 | struct zone *zone = page_zone(page); |
fa9add64 HD |
62 | struct lruvec *lruvec; |
63 | unsigned long flags; | |
b221385b AB |
64 | |
65 | spin_lock_irqsave(&zone->lru_lock, flags); | |
fa9add64 | 66 | lruvec = mem_cgroup_page_lruvec(page, zone); |
309381fe | 67 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
b221385b | 68 | __ClearPageLRU(page); |
fa9add64 | 69 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
b221385b AB |
70 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
71 | } | |
0a31bc97 | 72 | mem_cgroup_uncharge(page); |
91807063 AA |
73 | } |
74 | ||
75 | static void __put_single_page(struct page *page) | |
76 | { | |
77 | __page_cache_release(page); | |
b745bc85 | 78 | free_hot_cold_page(page, false); |
b221385b AB |
79 | } |
80 | ||
91807063 | 81 | static void __put_compound_page(struct page *page) |
1da177e4 | 82 | { |
91807063 | 83 | compound_page_dtor *dtor; |
1da177e4 | 84 | |
822fc613 NH |
85 | /* |
86 | * __page_cache_release() is supposed to be called for thp, not for | |
87 | * hugetlb. This is because hugetlb page does never have PageLRU set | |
88 | * (it's never listed to any LRU lists) and no memcg routines should | |
89 | * be called for hugetlb (it has a separate hugetlb_cgroup.) | |
90 | */ | |
91 | if (!PageHuge(page)) | |
92 | __page_cache_release(page); | |
91807063 AA |
93 | dtor = get_compound_page_dtor(page); |
94 | (*dtor)(page); | |
95 | } | |
96 | ||
ddc58f27 | 97 | void __put_page(struct page *page) |
8519fb30 NP |
98 | { |
99 | if (unlikely(PageCompound(page))) | |
ddc58f27 KS |
100 | __put_compound_page(page); |
101 | else | |
91807063 | 102 | __put_single_page(page); |
1da177e4 | 103 | } |
ddc58f27 | 104 | EXPORT_SYMBOL(__put_page); |
70b50f94 | 105 | |
1d7ea732 | 106 | /** |
7682486b RD |
107 | * put_pages_list() - release a list of pages |
108 | * @pages: list of pages threaded on page->lru | |
1d7ea732 AZ |
109 | * |
110 | * Release a list of pages which are strung together on page.lru. Currently | |
111 | * used by read_cache_pages() and related error recovery code. | |
1d7ea732 AZ |
112 | */ |
113 | void put_pages_list(struct list_head *pages) | |
114 | { | |
115 | while (!list_empty(pages)) { | |
116 | struct page *victim; | |
117 | ||
118 | victim = list_entry(pages->prev, struct page, lru); | |
119 | list_del(&victim->lru); | |
09cbfeaf | 120 | put_page(victim); |
1d7ea732 AZ |
121 | } |
122 | } | |
123 | EXPORT_SYMBOL(put_pages_list); | |
124 | ||
18022c5d MG |
125 | /* |
126 | * get_kernel_pages() - pin kernel pages in memory | |
127 | * @kiov: An array of struct kvec structures | |
128 | * @nr_segs: number of segments to pin | |
129 | * @write: pinning for read/write, currently ignored | |
130 | * @pages: array that receives pointers to the pages pinned. | |
131 | * Should be at least nr_segs long. | |
132 | * | |
133 | * Returns number of pages pinned. This may be fewer than the number | |
134 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
135 | * were pinned, returns -errno. Each page returned must be released | |
136 | * with a put_page() call when it is finished with. | |
137 | */ | |
138 | int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, | |
139 | struct page **pages) | |
140 | { | |
141 | int seg; | |
142 | ||
143 | for (seg = 0; seg < nr_segs; seg++) { | |
144 | if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) | |
145 | return seg; | |
146 | ||
5a178119 | 147 | pages[seg] = kmap_to_page(kiov[seg].iov_base); |
09cbfeaf | 148 | get_page(pages[seg]); |
18022c5d MG |
149 | } |
150 | ||
151 | return seg; | |
152 | } | |
153 | EXPORT_SYMBOL_GPL(get_kernel_pages); | |
154 | ||
155 | /* | |
156 | * get_kernel_page() - pin a kernel page in memory | |
157 | * @start: starting kernel address | |
158 | * @write: pinning for read/write, currently ignored | |
159 | * @pages: array that receives pointer to the page pinned. | |
160 | * Must be at least nr_segs long. | |
161 | * | |
162 | * Returns 1 if page is pinned. If the page was not pinned, returns | |
163 | * -errno. The page returned must be released with a put_page() call | |
164 | * when it is finished with. | |
165 | */ | |
166 | int get_kernel_page(unsigned long start, int write, struct page **pages) | |
167 | { | |
168 | const struct kvec kiov = { | |
169 | .iov_base = (void *)start, | |
170 | .iov_len = PAGE_SIZE | |
171 | }; | |
172 | ||
173 | return get_kernel_pages(&kiov, 1, write, pages); | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(get_kernel_page); | |
176 | ||
3dd7ae8e | 177 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
fa9add64 HD |
178 | void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), |
179 | void *arg) | |
902aaed0 HH |
180 | { |
181 | int i; | |
902aaed0 | 182 | struct zone *zone = NULL; |
fa9add64 | 183 | struct lruvec *lruvec; |
3dd7ae8e | 184 | unsigned long flags = 0; |
902aaed0 HH |
185 | |
186 | for (i = 0; i < pagevec_count(pvec); i++) { | |
187 | struct page *page = pvec->pages[i]; | |
188 | struct zone *pagezone = page_zone(page); | |
189 | ||
190 | if (pagezone != zone) { | |
191 | if (zone) | |
3dd7ae8e | 192 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
902aaed0 | 193 | zone = pagezone; |
3dd7ae8e | 194 | spin_lock_irqsave(&zone->lru_lock, flags); |
902aaed0 | 195 | } |
3dd7ae8e | 196 | |
fa9add64 HD |
197 | lruvec = mem_cgroup_page_lruvec(page, zone); |
198 | (*move_fn)(page, lruvec, arg); | |
902aaed0 HH |
199 | } |
200 | if (zone) | |
3dd7ae8e | 201 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
83896fb5 LT |
202 | release_pages(pvec->pages, pvec->nr, pvec->cold); |
203 | pagevec_reinit(pvec); | |
d8505dee SL |
204 | } |
205 | ||
fa9add64 HD |
206 | static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, |
207 | void *arg) | |
3dd7ae8e SL |
208 | { |
209 | int *pgmoved = arg; | |
3dd7ae8e SL |
210 | |
211 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | |
212 | enum lru_list lru = page_lru_base_type(page); | |
925b7673 | 213 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
3dd7ae8e SL |
214 | (*pgmoved)++; |
215 | } | |
216 | } | |
217 | ||
218 | /* | |
219 | * pagevec_move_tail() must be called with IRQ disabled. | |
220 | * Otherwise this may cause nasty races. | |
221 | */ | |
222 | static void pagevec_move_tail(struct pagevec *pvec) | |
223 | { | |
224 | int pgmoved = 0; | |
225 | ||
226 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); | |
227 | __count_vm_events(PGROTATED, pgmoved); | |
228 | } | |
229 | ||
1da177e4 LT |
230 | /* |
231 | * Writeback is about to end against a page which has been marked for immediate | |
232 | * reclaim. If it still appears to be reclaimable, move it to the tail of the | |
902aaed0 | 233 | * inactive list. |
1da177e4 | 234 | */ |
3dd7ae8e | 235 | void rotate_reclaimable_page(struct page *page) |
1da177e4 | 236 | { |
ac6aadb2 | 237 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
894bc310 | 238 | !PageUnevictable(page) && PageLRU(page)) { |
ac6aadb2 MS |
239 | struct pagevec *pvec; |
240 | unsigned long flags; | |
241 | ||
09cbfeaf | 242 | get_page(page); |
ac6aadb2 | 243 | local_irq_save(flags); |
7c8e0181 | 244 | pvec = this_cpu_ptr(&lru_rotate_pvecs); |
ac6aadb2 MS |
245 | if (!pagevec_add(pvec, page)) |
246 | pagevec_move_tail(pvec); | |
247 | local_irq_restore(flags); | |
248 | } | |
1da177e4 LT |
249 | } |
250 | ||
fa9add64 | 251 | static void update_page_reclaim_stat(struct lruvec *lruvec, |
3e2f41f1 KM |
252 | int file, int rotated) |
253 | { | |
fa9add64 | 254 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
3e2f41f1 KM |
255 | |
256 | reclaim_stat->recent_scanned[file]++; | |
257 | if (rotated) | |
258 | reclaim_stat->recent_rotated[file]++; | |
3e2f41f1 KM |
259 | } |
260 | ||
fa9add64 HD |
261 | static void __activate_page(struct page *page, struct lruvec *lruvec, |
262 | void *arg) | |
1da177e4 | 263 | { |
744ed144 | 264 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
7a608572 LT |
265 | int file = page_is_file_cache(page); |
266 | int lru = page_lru_base_type(page); | |
744ed144 | 267 | |
fa9add64 | 268 | del_page_from_lru_list(page, lruvec, lru); |
7a608572 LT |
269 | SetPageActive(page); |
270 | lru += LRU_ACTIVE; | |
fa9add64 | 271 | add_page_to_lru_list(page, lruvec, lru); |
24b7e581 | 272 | trace_mm_lru_activate(page); |
4f98a2fe | 273 | |
fa9add64 HD |
274 | __count_vm_event(PGACTIVATE); |
275 | update_page_reclaim_stat(lruvec, file, 1); | |
1da177e4 | 276 | } |
eb709b0d SL |
277 | } |
278 | ||
279 | #ifdef CONFIG_SMP | |
eb709b0d SL |
280 | static void activate_page_drain(int cpu) |
281 | { | |
282 | struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); | |
283 | ||
284 | if (pagevec_count(pvec)) | |
285 | pagevec_lru_move_fn(pvec, __activate_page, NULL); | |
286 | } | |
287 | ||
5fbc4616 CM |
288 | static bool need_activate_page_drain(int cpu) |
289 | { | |
290 | return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; | |
291 | } | |
292 | ||
eb709b0d SL |
293 | void activate_page(struct page *page) |
294 | { | |
295 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | |
296 | struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); | |
297 | ||
09cbfeaf | 298 | get_page(page); |
eb709b0d SL |
299 | if (!pagevec_add(pvec, page)) |
300 | pagevec_lru_move_fn(pvec, __activate_page, NULL); | |
301 | put_cpu_var(activate_page_pvecs); | |
302 | } | |
303 | } | |
304 | ||
305 | #else | |
306 | static inline void activate_page_drain(int cpu) | |
307 | { | |
308 | } | |
309 | ||
5fbc4616 CM |
310 | static bool need_activate_page_drain(int cpu) |
311 | { | |
312 | return false; | |
313 | } | |
314 | ||
eb709b0d SL |
315 | void activate_page(struct page *page) |
316 | { | |
317 | struct zone *zone = page_zone(page); | |
318 | ||
319 | spin_lock_irq(&zone->lru_lock); | |
fa9add64 | 320 | __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); |
1da177e4 LT |
321 | spin_unlock_irq(&zone->lru_lock); |
322 | } | |
eb709b0d | 323 | #endif |
1da177e4 | 324 | |
059285a2 MG |
325 | static void __lru_cache_activate_page(struct page *page) |
326 | { | |
327 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); | |
328 | int i; | |
329 | ||
330 | /* | |
331 | * Search backwards on the optimistic assumption that the page being | |
332 | * activated has just been added to this pagevec. Note that only | |
333 | * the local pagevec is examined as a !PageLRU page could be in the | |
334 | * process of being released, reclaimed, migrated or on a remote | |
335 | * pagevec that is currently being drained. Furthermore, marking | |
336 | * a remote pagevec's page PageActive potentially hits a race where | |
337 | * a page is marked PageActive just after it is added to the inactive | |
338 | * list causing accounting errors and BUG_ON checks to trigger. | |
339 | */ | |
340 | for (i = pagevec_count(pvec) - 1; i >= 0; i--) { | |
341 | struct page *pagevec_page = pvec->pages[i]; | |
342 | ||
343 | if (pagevec_page == page) { | |
344 | SetPageActive(page); | |
345 | break; | |
346 | } | |
347 | } | |
348 | ||
349 | put_cpu_var(lru_add_pvec); | |
350 | } | |
351 | ||
1da177e4 LT |
352 | /* |
353 | * Mark a page as having seen activity. | |
354 | * | |
355 | * inactive,unreferenced -> inactive,referenced | |
356 | * inactive,referenced -> active,unreferenced | |
357 | * active,unreferenced -> active,referenced | |
eb39d618 HD |
358 | * |
359 | * When a newly allocated page is not yet visible, so safe for non-atomic ops, | |
360 | * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). | |
1da177e4 | 361 | */ |
920c7a5d | 362 | void mark_page_accessed(struct page *page) |
1da177e4 | 363 | { |
e90309c9 | 364 | page = compound_head(page); |
894bc310 | 365 | if (!PageActive(page) && !PageUnevictable(page) && |
059285a2 MG |
366 | PageReferenced(page)) { |
367 | ||
368 | /* | |
369 | * If the page is on the LRU, queue it for activation via | |
370 | * activate_page_pvecs. Otherwise, assume the page is on a | |
371 | * pagevec, mark it active and it'll be moved to the active | |
372 | * LRU on the next drain. | |
373 | */ | |
374 | if (PageLRU(page)) | |
375 | activate_page(page); | |
376 | else | |
377 | __lru_cache_activate_page(page); | |
1da177e4 | 378 | ClearPageReferenced(page); |
a528910e JW |
379 | if (page_is_file_cache(page)) |
380 | workingset_activation(page); | |
1da177e4 LT |
381 | } else if (!PageReferenced(page)) { |
382 | SetPageReferenced(page); | |
383 | } | |
33c3fc71 VD |
384 | if (page_is_idle(page)) |
385 | clear_page_idle(page); | |
1da177e4 | 386 | } |
1da177e4 LT |
387 | EXPORT_SYMBOL(mark_page_accessed); |
388 | ||
2329d375 | 389 | static void __lru_cache_add(struct page *page) |
1da177e4 | 390 | { |
13f7f789 MG |
391 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
392 | ||
09cbfeaf | 393 | get_page(page); |
d741c9cd | 394 | if (!pagevec_space(pvec)) |
a0b8cab3 | 395 | __pagevec_lru_add(pvec); |
d741c9cd | 396 | pagevec_add(pvec, page); |
13f7f789 | 397 | put_cpu_var(lru_add_pvec); |
1da177e4 | 398 | } |
2329d375 JZ |
399 | |
400 | /** | |
401 | * lru_cache_add: add a page to the page lists | |
402 | * @page: the page to add | |
403 | */ | |
404 | void lru_cache_add_anon(struct page *page) | |
405 | { | |
6fb81a17 MG |
406 | if (PageActive(page)) |
407 | ClearPageActive(page); | |
2329d375 JZ |
408 | __lru_cache_add(page); |
409 | } | |
410 | ||
411 | void lru_cache_add_file(struct page *page) | |
412 | { | |
6fb81a17 MG |
413 | if (PageActive(page)) |
414 | ClearPageActive(page); | |
2329d375 JZ |
415 | __lru_cache_add(page); |
416 | } | |
417 | EXPORT_SYMBOL(lru_cache_add_file); | |
1da177e4 | 418 | |
f04e9ebb | 419 | /** |
c53954a0 | 420 | * lru_cache_add - add a page to a page list |
f04e9ebb | 421 | * @page: the page to be added to the LRU. |
2329d375 JZ |
422 | * |
423 | * Queue the page for addition to the LRU via pagevec. The decision on whether | |
424 | * to add the page to the [in]active [file|anon] list is deferred until the | |
425 | * pagevec is drained. This gives a chance for the caller of lru_cache_add() | |
426 | * have the page added to the active list using mark_page_accessed(). | |
f04e9ebb | 427 | */ |
c53954a0 | 428 | void lru_cache_add(struct page *page) |
1da177e4 | 429 | { |
309381fe SL |
430 | VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); |
431 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
c53954a0 | 432 | __lru_cache_add(page); |
1da177e4 LT |
433 | } |
434 | ||
894bc310 LS |
435 | /** |
436 | * add_page_to_unevictable_list - add a page to the unevictable list | |
437 | * @page: the page to be added to the unevictable list | |
438 | * | |
439 | * Add page directly to its zone's unevictable list. To avoid races with | |
440 | * tasks that might be making the page evictable, through eg. munlock, | |
441 | * munmap or exit, while it's not on the lru, we want to add the page | |
442 | * while it's locked or otherwise "invisible" to other tasks. This is | |
443 | * difficult to do when using the pagevec cache, so bypass that. | |
444 | */ | |
445 | void add_page_to_unevictable_list(struct page *page) | |
446 | { | |
447 | struct zone *zone = page_zone(page); | |
fa9add64 | 448 | struct lruvec *lruvec; |
894bc310 LS |
449 | |
450 | spin_lock_irq(&zone->lru_lock); | |
fa9add64 | 451 | lruvec = mem_cgroup_page_lruvec(page, zone); |
ef2a2cbd | 452 | ClearPageActive(page); |
894bc310 LS |
453 | SetPageUnevictable(page); |
454 | SetPageLRU(page); | |
fa9add64 | 455 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); |
894bc310 LS |
456 | spin_unlock_irq(&zone->lru_lock); |
457 | } | |
458 | ||
00501b53 JW |
459 | /** |
460 | * lru_cache_add_active_or_unevictable | |
461 | * @page: the page to be added to LRU | |
462 | * @vma: vma in which page is mapped for determining reclaimability | |
463 | * | |
464 | * Place @page on the active or unevictable LRU list, depending on its | |
465 | * evictability. Note that if the page is not evictable, it goes | |
466 | * directly back onto it's zone's unevictable list, it does NOT use a | |
467 | * per cpu pagevec. | |
468 | */ | |
469 | void lru_cache_add_active_or_unevictable(struct page *page, | |
470 | struct vm_area_struct *vma) | |
471 | { | |
472 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
473 | ||
474 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { | |
475 | SetPageActive(page); | |
476 | lru_cache_add(page); | |
477 | return; | |
478 | } | |
479 | ||
480 | if (!TestSetPageMlocked(page)) { | |
481 | /* | |
482 | * We use the irq-unsafe __mod_zone_page_stat because this | |
483 | * counter is not modified from interrupt context, and the pte | |
484 | * lock is held(spinlock), which implies preemption disabled. | |
485 | */ | |
486 | __mod_zone_page_state(page_zone(page), NR_MLOCK, | |
487 | hpage_nr_pages(page)); | |
488 | count_vm_event(UNEVICTABLE_PGMLOCKED); | |
489 | } | |
490 | add_page_to_unevictable_list(page); | |
491 | } | |
492 | ||
31560180 MK |
493 | /* |
494 | * If the page can not be invalidated, it is moved to the | |
495 | * inactive list to speed up its reclaim. It is moved to the | |
496 | * head of the list, rather than the tail, to give the flusher | |
497 | * threads some time to write it out, as this is much more | |
498 | * effective than the single-page writeout from reclaim. | |
278df9f4 MK |
499 | * |
500 | * If the page isn't page_mapped and dirty/writeback, the page | |
501 | * could reclaim asap using PG_reclaim. | |
502 | * | |
503 | * 1. active, mapped page -> none | |
504 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim | |
505 | * 3. inactive, mapped page -> none | |
506 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim | |
507 | * 5. inactive, clean -> inactive, tail | |
508 | * 6. Others -> none | |
509 | * | |
510 | * In 4, why it moves inactive's head, the VM expects the page would | |
511 | * be write it out by flusher threads as this is much more effective | |
512 | * than the single-page writeout from reclaim. | |
31560180 | 513 | */ |
cc5993bd | 514 | static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, |
fa9add64 | 515 | void *arg) |
31560180 MK |
516 | { |
517 | int lru, file; | |
278df9f4 | 518 | bool active; |
31560180 | 519 | |
278df9f4 | 520 | if (!PageLRU(page)) |
31560180 MK |
521 | return; |
522 | ||
bad49d9c MK |
523 | if (PageUnevictable(page)) |
524 | return; | |
525 | ||
31560180 MK |
526 | /* Some processes are using the page */ |
527 | if (page_mapped(page)) | |
528 | return; | |
529 | ||
278df9f4 | 530 | active = PageActive(page); |
31560180 MK |
531 | file = page_is_file_cache(page); |
532 | lru = page_lru_base_type(page); | |
fa9add64 HD |
533 | |
534 | del_page_from_lru_list(page, lruvec, lru + active); | |
31560180 MK |
535 | ClearPageActive(page); |
536 | ClearPageReferenced(page); | |
fa9add64 | 537 | add_page_to_lru_list(page, lruvec, lru); |
31560180 | 538 | |
278df9f4 MK |
539 | if (PageWriteback(page) || PageDirty(page)) { |
540 | /* | |
541 | * PG_reclaim could be raced with end_page_writeback | |
542 | * It can make readahead confusing. But race window | |
543 | * is _really_ small and it's non-critical problem. | |
544 | */ | |
545 | SetPageReclaim(page); | |
546 | } else { | |
547 | /* | |
548 | * The page's writeback ends up during pagevec | |
549 | * We moves tha page into tail of inactive. | |
550 | */ | |
925b7673 | 551 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
278df9f4 MK |
552 | __count_vm_event(PGROTATED); |
553 | } | |
554 | ||
555 | if (active) | |
556 | __count_vm_event(PGDEACTIVATE); | |
fa9add64 | 557 | update_page_reclaim_stat(lruvec, file, 0); |
31560180 MK |
558 | } |
559 | ||
10853a03 MK |
560 | |
561 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, | |
562 | void *arg) | |
563 | { | |
564 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { | |
565 | int file = page_is_file_cache(page); | |
566 | int lru = page_lru_base_type(page); | |
567 | ||
568 | del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); | |
569 | ClearPageActive(page); | |
570 | ClearPageReferenced(page); | |
571 | add_page_to_lru_list(page, lruvec, lru); | |
572 | ||
573 | __count_vm_event(PGDEACTIVATE); | |
574 | update_page_reclaim_stat(lruvec, file, 0); | |
575 | } | |
576 | } | |
577 | ||
902aaed0 HH |
578 | /* |
579 | * Drain pages out of the cpu's pagevecs. | |
580 | * Either "cpu" is the current CPU, and preemption has already been | |
581 | * disabled; or "cpu" is being hot-unplugged, and is already dead. | |
582 | */ | |
f0cb3c76 | 583 | void lru_add_drain_cpu(int cpu) |
1da177e4 | 584 | { |
13f7f789 | 585 | struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); |
1da177e4 | 586 | |
13f7f789 | 587 | if (pagevec_count(pvec)) |
a0b8cab3 | 588 | __pagevec_lru_add(pvec); |
902aaed0 HH |
589 | |
590 | pvec = &per_cpu(lru_rotate_pvecs, cpu); | |
591 | if (pagevec_count(pvec)) { | |
592 | unsigned long flags; | |
593 | ||
594 | /* No harm done if a racing interrupt already did this */ | |
595 | local_irq_save(flags); | |
596 | pagevec_move_tail(pvec); | |
597 | local_irq_restore(flags); | |
598 | } | |
31560180 | 599 | |
cc5993bd | 600 | pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
31560180 | 601 | if (pagevec_count(pvec)) |
cc5993bd | 602 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
eb709b0d | 603 | |
10853a03 MK |
604 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); |
605 | if (pagevec_count(pvec)) | |
606 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | |
607 | ||
eb709b0d | 608 | activate_page_drain(cpu); |
31560180 MK |
609 | } |
610 | ||
611 | /** | |
cc5993bd | 612 | * deactivate_file_page - forcefully deactivate a file page |
31560180 MK |
613 | * @page: page to deactivate |
614 | * | |
615 | * This function hints the VM that @page is a good reclaim candidate, | |
616 | * for example if its invalidation fails due to the page being dirty | |
617 | * or under writeback. | |
618 | */ | |
cc5993bd | 619 | void deactivate_file_page(struct page *page) |
31560180 | 620 | { |
821ed6bb | 621 | /* |
cc5993bd MK |
622 | * In a workload with many unevictable page such as mprotect, |
623 | * unevictable page deactivation for accelerating reclaim is pointless. | |
821ed6bb MK |
624 | */ |
625 | if (PageUnevictable(page)) | |
626 | return; | |
627 | ||
31560180 | 628 | if (likely(get_page_unless_zero(page))) { |
cc5993bd | 629 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
31560180 MK |
630 | |
631 | if (!pagevec_add(pvec, page)) | |
cc5993bd MK |
632 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
633 | put_cpu_var(lru_deactivate_file_pvecs); | |
31560180 | 634 | } |
80bfed90 AM |
635 | } |
636 | ||
10853a03 MK |
637 | /** |
638 | * deactivate_page - deactivate a page | |
639 | * @page: page to deactivate | |
640 | * | |
641 | * deactivate_page() moves @page to the inactive list if @page was on the active | |
642 | * list and was not an unevictable page. This is done to accelerate the reclaim | |
643 | * of @page. | |
644 | */ | |
645 | void deactivate_page(struct page *page) | |
646 | { | |
647 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { | |
648 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | |
649 | ||
09cbfeaf | 650 | get_page(page); |
10853a03 MK |
651 | if (!pagevec_add(pvec, page)) |
652 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | |
653 | put_cpu_var(lru_deactivate_pvecs); | |
654 | } | |
655 | } | |
656 | ||
80bfed90 AM |
657 | void lru_add_drain(void) |
658 | { | |
f0cb3c76 | 659 | lru_add_drain_cpu(get_cpu()); |
80bfed90 | 660 | put_cpu(); |
1da177e4 LT |
661 | } |
662 | ||
c4028958 | 663 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
053837fc NP |
664 | { |
665 | lru_add_drain(); | |
666 | } | |
667 | ||
5fbc4616 CM |
668 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
669 | ||
670 | void lru_add_drain_all(void) | |
053837fc | 671 | { |
5fbc4616 CM |
672 | static DEFINE_MUTEX(lock); |
673 | static struct cpumask has_work; | |
674 | int cpu; | |
675 | ||
676 | mutex_lock(&lock); | |
677 | get_online_cpus(); | |
678 | cpumask_clear(&has_work); | |
679 | ||
680 | for_each_online_cpu(cpu) { | |
681 | struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); | |
682 | ||
683 | if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || | |
684 | pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || | |
cc5993bd | 685 | pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || |
10853a03 | 686 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || |
5fbc4616 CM |
687 | need_activate_page_drain(cpu)) { |
688 | INIT_WORK(work, lru_add_drain_per_cpu); | |
689 | schedule_work_on(cpu, work); | |
690 | cpumask_set_cpu(cpu, &has_work); | |
691 | } | |
692 | } | |
693 | ||
694 | for_each_cpu(cpu, &has_work) | |
695 | flush_work(&per_cpu(lru_add_drain_work, cpu)); | |
696 | ||
697 | put_online_cpus(); | |
698 | mutex_unlock(&lock); | |
053837fc NP |
699 | } |
700 | ||
aabfb572 | 701 | /** |
ea1754a0 | 702 | * release_pages - batched put_page() |
aabfb572 MH |
703 | * @pages: array of pages to release |
704 | * @nr: number of pages | |
705 | * @cold: whether the pages are cache cold | |
1da177e4 | 706 | * |
aabfb572 MH |
707 | * Decrement the reference count on all the pages in @pages. If it |
708 | * fell to zero, remove the page from the LRU and free it. | |
1da177e4 | 709 | */ |
b745bc85 | 710 | void release_pages(struct page **pages, int nr, bool cold) |
1da177e4 LT |
711 | { |
712 | int i; | |
cc59850e | 713 | LIST_HEAD(pages_to_free); |
1da177e4 | 714 | struct zone *zone = NULL; |
fa9add64 | 715 | struct lruvec *lruvec; |
902aaed0 | 716 | unsigned long uninitialized_var(flags); |
aabfb572 | 717 | unsigned int uninitialized_var(lock_batch); |
1da177e4 | 718 | |
1da177e4 LT |
719 | for (i = 0; i < nr; i++) { |
720 | struct page *page = pages[i]; | |
1da177e4 | 721 | |
aabfb572 MH |
722 | /* |
723 | * Make sure the IRQ-safe lock-holding time does not get | |
724 | * excessive with a continuous string of pages from the | |
725 | * same zone. The lock is held only if zone != NULL. | |
726 | */ | |
727 | if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { | |
728 | spin_unlock_irqrestore(&zone->lru_lock, flags); | |
729 | zone = NULL; | |
730 | } | |
731 | ||
aa88b68c KS |
732 | if (is_huge_zero_page(page)) { |
733 | put_huge_zero_page(); | |
734 | continue; | |
735 | } | |
736 | ||
ddc58f27 | 737 | page = compound_head(page); |
b5810039 | 738 | if (!put_page_testzero(page)) |
1da177e4 LT |
739 | continue; |
740 | ||
ddc58f27 KS |
741 | if (PageCompound(page)) { |
742 | if (zone) { | |
743 | spin_unlock_irqrestore(&zone->lru_lock, flags); | |
744 | zone = NULL; | |
745 | } | |
746 | __put_compound_page(page); | |
747 | continue; | |
748 | } | |
749 | ||
46453a6e NP |
750 | if (PageLRU(page)) { |
751 | struct zone *pagezone = page_zone(page); | |
894bc310 | 752 | |
46453a6e NP |
753 | if (pagezone != zone) { |
754 | if (zone) | |
902aaed0 HH |
755 | spin_unlock_irqrestore(&zone->lru_lock, |
756 | flags); | |
aabfb572 | 757 | lock_batch = 0; |
46453a6e | 758 | zone = pagezone; |
902aaed0 | 759 | spin_lock_irqsave(&zone->lru_lock, flags); |
46453a6e | 760 | } |
fa9add64 HD |
761 | |
762 | lruvec = mem_cgroup_page_lruvec(page, zone); | |
309381fe | 763 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
67453911 | 764 | __ClearPageLRU(page); |
fa9add64 | 765 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
46453a6e NP |
766 | } |
767 | ||
c53954a0 | 768 | /* Clear Active bit in case of parallel mark_page_accessed */ |
e3741b50 | 769 | __ClearPageActive(page); |
c53954a0 | 770 | |
cc59850e | 771 | list_add(&page->lru, &pages_to_free); |
1da177e4 LT |
772 | } |
773 | if (zone) | |
902aaed0 | 774 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
1da177e4 | 775 | |
747db954 | 776 | mem_cgroup_uncharge_list(&pages_to_free); |
cc59850e | 777 | free_hot_cold_page_list(&pages_to_free, cold); |
1da177e4 | 778 | } |
0be8557b | 779 | EXPORT_SYMBOL(release_pages); |
1da177e4 LT |
780 | |
781 | /* | |
782 | * The pages which we're about to release may be in the deferred lru-addition | |
783 | * queues. That would prevent them from really being freed right now. That's | |
784 | * OK from a correctness point of view but is inefficient - those pages may be | |
785 | * cache-warm and we want to give them back to the page allocator ASAP. | |
786 | * | |
787 | * So __pagevec_release() will drain those queues here. __pagevec_lru_add() | |
788 | * and __pagevec_lru_add_active() call release_pages() directly to avoid | |
789 | * mutual recursion. | |
790 | */ | |
791 | void __pagevec_release(struct pagevec *pvec) | |
792 | { | |
793 | lru_add_drain(); | |
794 | release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); | |
795 | pagevec_reinit(pvec); | |
796 | } | |
7f285701 SF |
797 | EXPORT_SYMBOL(__pagevec_release); |
798 | ||
12d27107 | 799 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
71e3aac0 | 800 | /* used by __split_huge_page_refcount() */ |
fa9add64 | 801 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
5bc7b8ac | 802 | struct lruvec *lruvec, struct list_head *list) |
71e3aac0 | 803 | { |
71e3aac0 | 804 | const int file = 0; |
71e3aac0 | 805 | |
309381fe SL |
806 | VM_BUG_ON_PAGE(!PageHead(page), page); |
807 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); | |
808 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); | |
fa9add64 HD |
809 | VM_BUG_ON(NR_CPUS != 1 && |
810 | !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); | |
71e3aac0 | 811 | |
5bc7b8ac SL |
812 | if (!list) |
813 | SetPageLRU(page_tail); | |
71e3aac0 | 814 | |
12d27107 HD |
815 | if (likely(PageLRU(page))) |
816 | list_add_tail(&page_tail->lru, &page->lru); | |
5bc7b8ac SL |
817 | else if (list) { |
818 | /* page reclaim is reclaiming a huge page */ | |
819 | get_page(page_tail); | |
820 | list_add_tail(&page_tail->lru, list); | |
821 | } else { | |
12d27107 HD |
822 | struct list_head *list_head; |
823 | /* | |
824 | * Head page has not yet been counted, as an hpage, | |
825 | * so we must account for each subpage individually. | |
826 | * | |
827 | * Use the standard add function to put page_tail on the list, | |
828 | * but then correct its position so they all end up in order. | |
829 | */ | |
e180cf80 | 830 | add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); |
12d27107 HD |
831 | list_head = page_tail->lru.prev; |
832 | list_move_tail(&page_tail->lru, list_head); | |
71e3aac0 | 833 | } |
7512102c HD |
834 | |
835 | if (!PageUnevictable(page)) | |
e180cf80 | 836 | update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); |
71e3aac0 | 837 | } |
12d27107 | 838 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
71e3aac0 | 839 | |
fa9add64 HD |
840 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
841 | void *arg) | |
3dd7ae8e | 842 | { |
13f7f789 MG |
843 | int file = page_is_file_cache(page); |
844 | int active = PageActive(page); | |
845 | enum lru_list lru = page_lru(page); | |
3dd7ae8e | 846 | |
309381fe | 847 | VM_BUG_ON_PAGE(PageLRU(page), page); |
3dd7ae8e SL |
848 | |
849 | SetPageLRU(page); | |
fa9add64 HD |
850 | add_page_to_lru_list(page, lruvec, lru); |
851 | update_page_reclaim_stat(lruvec, file, active); | |
24b7e581 | 852 | trace_mm_lru_insertion(page, lru); |
3dd7ae8e SL |
853 | } |
854 | ||
1da177e4 LT |
855 | /* |
856 | * Add the passed pages to the LRU, then drop the caller's refcount | |
857 | * on them. Reinitialises the caller's pagevec. | |
858 | */ | |
a0b8cab3 | 859 | void __pagevec_lru_add(struct pagevec *pvec) |
1da177e4 | 860 | { |
a0b8cab3 | 861 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); |
1da177e4 | 862 | } |
5095ae83 | 863 | EXPORT_SYMBOL(__pagevec_lru_add); |
1da177e4 | 864 | |
0cd6144a JW |
865 | /** |
866 | * pagevec_lookup_entries - gang pagecache lookup | |
867 | * @pvec: Where the resulting entries are placed | |
868 | * @mapping: The address_space to search | |
869 | * @start: The starting entry index | |
870 | * @nr_entries: The maximum number of entries | |
871 | * @indices: The cache indices corresponding to the entries in @pvec | |
872 | * | |
873 | * pagevec_lookup_entries() will search for and return a group of up | |
874 | * to @nr_entries pages and shadow entries in the mapping. All | |
875 | * entries are placed in @pvec. pagevec_lookup_entries() takes a | |
876 | * reference against actual pages in @pvec. | |
877 | * | |
878 | * The search returns a group of mapping-contiguous entries with | |
879 | * ascending indexes. There may be holes in the indices due to | |
880 | * not-present entries. | |
881 | * | |
882 | * pagevec_lookup_entries() returns the number of entries which were | |
883 | * found. | |
884 | */ | |
885 | unsigned pagevec_lookup_entries(struct pagevec *pvec, | |
886 | struct address_space *mapping, | |
887 | pgoff_t start, unsigned nr_pages, | |
888 | pgoff_t *indices) | |
889 | { | |
890 | pvec->nr = find_get_entries(mapping, start, nr_pages, | |
891 | pvec->pages, indices); | |
892 | return pagevec_count(pvec); | |
893 | } | |
894 | ||
895 | /** | |
896 | * pagevec_remove_exceptionals - pagevec exceptionals pruning | |
897 | * @pvec: The pagevec to prune | |
898 | * | |
899 | * pagevec_lookup_entries() fills both pages and exceptional radix | |
900 | * tree entries into the pagevec. This function prunes all | |
901 | * exceptionals from @pvec without leaving holes, so that it can be | |
902 | * passed on to page-only pagevec operations. | |
903 | */ | |
904 | void pagevec_remove_exceptionals(struct pagevec *pvec) | |
905 | { | |
906 | int i, j; | |
907 | ||
908 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { | |
909 | struct page *page = pvec->pages[i]; | |
910 | if (!radix_tree_exceptional_entry(page)) | |
911 | pvec->pages[j++] = page; | |
912 | } | |
913 | pvec->nr = j; | |
914 | } | |
915 | ||
1da177e4 LT |
916 | /** |
917 | * pagevec_lookup - gang pagecache lookup | |
918 | * @pvec: Where the resulting pages are placed | |
919 | * @mapping: The address_space to search | |
920 | * @start: The starting page index | |
921 | * @nr_pages: The maximum number of pages | |
922 | * | |
923 | * pagevec_lookup() will search for and return a group of up to @nr_pages pages | |
924 | * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a | |
925 | * reference against the pages in @pvec. | |
926 | * | |
927 | * The search returns a group of mapping-contiguous pages with ascending | |
928 | * indexes. There may be holes in the indices due to not-present pages. | |
929 | * | |
930 | * pagevec_lookup() returns the number of pages which were found. | |
931 | */ | |
932 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | |
933 | pgoff_t start, unsigned nr_pages) | |
934 | { | |
935 | pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); | |
936 | return pagevec_count(pvec); | |
937 | } | |
78539fdf CH |
938 | EXPORT_SYMBOL(pagevec_lookup); |
939 | ||
1da177e4 LT |
940 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, |
941 | pgoff_t *index, int tag, unsigned nr_pages) | |
942 | { | |
943 | pvec->nr = find_get_pages_tag(mapping, index, tag, | |
944 | nr_pages, pvec->pages); | |
945 | return pagevec_count(pvec); | |
946 | } | |
7f285701 | 947 | EXPORT_SYMBOL(pagevec_lookup_tag); |
1da177e4 | 948 | |
1da177e4 LT |
949 | /* |
950 | * Perform any setup for the swap system | |
951 | */ | |
952 | void __init swap_setup(void) | |
953 | { | |
4481374c | 954 | unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); |
e0bf68dd | 955 | #ifdef CONFIG_SWAP |
33806f06 SL |
956 | int i; |
957 | ||
27ba0644 | 958 | for (i = 0; i < MAX_SWAPFILES; i++) |
33806f06 | 959 | spin_lock_init(&swapper_spaces[i].tree_lock); |
e0bf68dd PZ |
960 | #endif |
961 | ||
1da177e4 LT |
962 | /* Use a smaller cluster for small-memory machines */ |
963 | if (megs < 16) | |
964 | page_cluster = 2; | |
965 | else | |
966 | page_cluster = 3; | |
967 | /* | |
968 | * Right now other parts of the system means that we | |
969 | * _really_ don't want to cluster much more | |
970 | */ | |
1da177e4 | 971 | } |