Commit | Line | Data |
---|---|---|
748446bb MG |
1 | /* |
2 | * linux/mm/compaction.c | |
3 | * | |
4 | * Memory compaction for the reduction of external fragmentation. Note that | |
5 | * this heavily depends upon page migration to do all the real heavy | |
6 | * lifting | |
7 | * | |
8 | * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> | |
9 | */ | |
10 | #include <linux/swap.h> | |
11 | #include <linux/migrate.h> | |
12 | #include <linux/compaction.h> | |
13 | #include <linux/mm_inline.h> | |
14 | #include <linux/backing-dev.h> | |
76ab0f53 | 15 | #include <linux/sysctl.h> |
ed4a6d7f | 16 | #include <linux/sysfs.h> |
748446bb MG |
17 | #include "internal.h" |
18 | ||
b7aba698 MG |
19 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/compaction.h> | |
21 | ||
748446bb MG |
22 | /* |
23 | * compact_control is used to track pages being migrated and the free pages | |
24 | * they are being migrated to during memory compaction. The free_pfn starts | |
25 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
26 | * are moved to the end of a zone during a compaction run and the run | |
27 | * completes when free_pfn <= migrate_pfn | |
28 | */ | |
29 | struct compact_control { | |
30 | struct list_head freepages; /* List of free pages to migrate to */ | |
31 | struct list_head migratepages; /* List of pages being migrated */ | |
32 | unsigned long nr_freepages; /* Number of isolated free pages */ | |
33 | unsigned long nr_migratepages; /* Number of pages to migrate */ | |
34 | unsigned long free_pfn; /* isolate_freepages search base */ | |
35 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | |
77f1fe6b | 36 | bool sync; /* Synchronous migration */ |
748446bb MG |
37 | |
38 | /* Account for isolated anon and file pages */ | |
39 | unsigned long nr_anon; | |
40 | unsigned long nr_file; | |
41 | ||
56de7263 MG |
42 | unsigned int order; /* order a direct compactor needs */ |
43 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | |
748446bb MG |
44 | struct zone *zone; |
45 | }; | |
46 | ||
47 | static unsigned long release_freepages(struct list_head *freelist) | |
48 | { | |
49 | struct page *page, *next; | |
50 | unsigned long count = 0; | |
51 | ||
52 | list_for_each_entry_safe(page, next, freelist, lru) { | |
53 | list_del(&page->lru); | |
54 | __free_page(page); | |
55 | count++; | |
56 | } | |
57 | ||
58 | return count; | |
59 | } | |
60 | ||
61 | /* Isolate free pages onto a private freelist. Must hold zone->lock */ | |
62 | static unsigned long isolate_freepages_block(struct zone *zone, | |
63 | unsigned long blockpfn, | |
64 | struct list_head *freelist) | |
65 | { | |
66 | unsigned long zone_end_pfn, end_pfn; | |
b7aba698 | 67 | int nr_scanned = 0, total_isolated = 0; |
748446bb MG |
68 | struct page *cursor; |
69 | ||
70 | /* Get the last PFN we should scan for free pages at */ | |
71 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
72 | end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn); | |
73 | ||
74 | /* Find the first usable PFN in the block to initialse page cursor */ | |
75 | for (; blockpfn < end_pfn; blockpfn++) { | |
76 | if (pfn_valid_within(blockpfn)) | |
77 | break; | |
78 | } | |
79 | cursor = pfn_to_page(blockpfn); | |
80 | ||
81 | /* Isolate free pages. This assumes the block is valid */ | |
82 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { | |
83 | int isolated, i; | |
84 | struct page *page = cursor; | |
85 | ||
86 | if (!pfn_valid_within(blockpfn)) | |
87 | continue; | |
b7aba698 | 88 | nr_scanned++; |
748446bb MG |
89 | |
90 | if (!PageBuddy(page)) | |
91 | continue; | |
92 | ||
93 | /* Found a free page, break it into order-0 pages */ | |
94 | isolated = split_free_page(page); | |
95 | total_isolated += isolated; | |
96 | for (i = 0; i < isolated; i++) { | |
97 | list_add(&page->lru, freelist); | |
98 | page++; | |
99 | } | |
100 | ||
101 | /* If a page was split, advance to the end of it */ | |
102 | if (isolated) { | |
103 | blockpfn += isolated - 1; | |
104 | cursor += isolated - 1; | |
105 | } | |
106 | } | |
107 | ||
b7aba698 | 108 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); |
748446bb MG |
109 | return total_isolated; |
110 | } | |
111 | ||
112 | /* Returns true if the page is within a block suitable for migration to */ | |
113 | static bool suitable_migration_target(struct page *page) | |
114 | { | |
115 | ||
116 | int migratetype = get_pageblock_migratetype(page); | |
117 | ||
118 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ | |
119 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) | |
120 | return false; | |
121 | ||
122 | /* If the page is a large free page, then allow migration */ | |
123 | if (PageBuddy(page) && page_order(page) >= pageblock_order) | |
124 | return true; | |
125 | ||
126 | /* If the block is MIGRATE_MOVABLE, allow migration */ | |
127 | if (migratetype == MIGRATE_MOVABLE) | |
128 | return true; | |
129 | ||
130 | /* Otherwise skip the block */ | |
131 | return false; | |
132 | } | |
133 | ||
134 | /* | |
135 | * Based on information in the current compact_control, find blocks | |
136 | * suitable for isolating free pages from and then isolate them. | |
137 | */ | |
138 | static void isolate_freepages(struct zone *zone, | |
139 | struct compact_control *cc) | |
140 | { | |
141 | struct page *page; | |
142 | unsigned long high_pfn, low_pfn, pfn; | |
143 | unsigned long flags; | |
144 | int nr_freepages = cc->nr_freepages; | |
145 | struct list_head *freelist = &cc->freepages; | |
146 | ||
147 | pfn = cc->free_pfn; | |
148 | low_pfn = cc->migrate_pfn + pageblock_nr_pages; | |
149 | high_pfn = low_pfn; | |
150 | ||
151 | /* | |
152 | * Isolate free pages until enough are available to migrate the | |
153 | * pages on cc->migratepages. We stop searching if the migrate | |
154 | * and free page scanners meet or enough free pages are isolated. | |
155 | */ | |
748446bb MG |
156 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
157 | pfn -= pageblock_nr_pages) { | |
158 | unsigned long isolated; | |
159 | ||
160 | if (!pfn_valid(pfn)) | |
161 | continue; | |
162 | ||
163 | /* | |
164 | * Check for overlapping nodes/zones. It's possible on some | |
165 | * configurations to have a setup like | |
166 | * node0 node1 node0 | |
167 | * i.e. it's possible that all pages within a zones range of | |
168 | * pages do not belong to a single zone. | |
169 | */ | |
170 | page = pfn_to_page(pfn); | |
171 | if (page_zone(page) != zone) | |
172 | continue; | |
173 | ||
174 | /* Check the block is suitable for migration */ | |
175 | if (!suitable_migration_target(page)) | |
176 | continue; | |
177 | ||
602605a4 MG |
178 | /* |
179 | * Found a block suitable for isolating free pages from. Now | |
180 | * we disabled interrupts, double check things are ok and | |
181 | * isolate the pages. This is to minimise the time IRQs | |
182 | * are disabled | |
183 | */ | |
184 | isolated = 0; | |
185 | spin_lock_irqsave(&zone->lock, flags); | |
186 | if (suitable_migration_target(page)) { | |
187 | isolated = isolate_freepages_block(zone, pfn, freelist); | |
188 | nr_freepages += isolated; | |
189 | } | |
190 | spin_unlock_irqrestore(&zone->lock, flags); | |
748446bb MG |
191 | |
192 | /* | |
193 | * Record the highest PFN we isolated pages from. When next | |
194 | * looking for free pages, the search will restart here as | |
195 | * page migration may have returned some pages to the allocator | |
196 | */ | |
197 | if (isolated) | |
198 | high_pfn = max(high_pfn, pfn); | |
199 | } | |
748446bb MG |
200 | |
201 | /* split_free_page does not map the pages */ | |
202 | list_for_each_entry(page, freelist, lru) { | |
203 | arch_alloc_page(page, 0); | |
204 | kernel_map_pages(page, 1, 1); | |
205 | } | |
206 | ||
207 | cc->free_pfn = high_pfn; | |
208 | cc->nr_freepages = nr_freepages; | |
209 | } | |
210 | ||
211 | /* Update the number of anon and file isolated pages in the zone */ | |
212 | static void acct_isolated(struct zone *zone, struct compact_control *cc) | |
213 | { | |
214 | struct page *page; | |
215 | unsigned int count[NR_LRU_LISTS] = { 0, }; | |
216 | ||
217 | list_for_each_entry(page, &cc->migratepages, lru) { | |
218 | int lru = page_lru_base_type(page); | |
219 | count[lru]++; | |
220 | } | |
221 | ||
222 | cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; | |
223 | cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; | |
224 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon); | |
225 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file); | |
226 | } | |
227 | ||
228 | /* Similar to reclaim, but different enough that they don't share logic */ | |
229 | static bool too_many_isolated(struct zone *zone) | |
230 | { | |
bc693045 | 231 | unsigned long active, inactive, isolated; |
748446bb MG |
232 | |
233 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + | |
234 | zone_page_state(zone, NR_INACTIVE_ANON); | |
bc693045 MK |
235 | active = zone_page_state(zone, NR_ACTIVE_FILE) + |
236 | zone_page_state(zone, NR_ACTIVE_ANON); | |
748446bb MG |
237 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
238 | zone_page_state(zone, NR_ISOLATED_ANON); | |
239 | ||
bc693045 | 240 | return isolated > (inactive + active) / 2; |
748446bb MG |
241 | } |
242 | ||
243 | /* | |
244 | * Isolate all pages that can be migrated from the block pointed to by | |
245 | * the migrate scanner within compact_control. | |
246 | */ | |
247 | static unsigned long isolate_migratepages(struct zone *zone, | |
248 | struct compact_control *cc) | |
249 | { | |
250 | unsigned long low_pfn, end_pfn; | |
9927af74 | 251 | unsigned long last_pageblock_nr = 0, pageblock_nr; |
b7aba698 | 252 | unsigned long nr_scanned = 0, nr_isolated = 0; |
748446bb MG |
253 | struct list_head *migratelist = &cc->migratepages; |
254 | ||
255 | /* Do not scan outside zone boundaries */ | |
256 | low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); | |
257 | ||
258 | /* Only scan within a pageblock boundary */ | |
259 | end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); | |
260 | ||
261 | /* Do not cross the free scanner or scan within a memory hole */ | |
262 | if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { | |
263 | cc->migrate_pfn = end_pfn; | |
264 | return 0; | |
265 | } | |
266 | ||
267 | /* | |
268 | * Ensure that there are not too many pages isolated from the LRU | |
269 | * list by either parallel reclaimers or compaction. If there are, | |
270 | * delay for some time until fewer pages are isolated | |
271 | */ | |
272 | while (unlikely(too_many_isolated(zone))) { | |
273 | congestion_wait(BLK_RW_ASYNC, HZ/10); | |
274 | ||
275 | if (fatal_signal_pending(current)) | |
276 | return 0; | |
277 | } | |
278 | ||
279 | /* Time to isolate some pages for migration */ | |
b2eef8c0 | 280 | cond_resched(); |
748446bb MG |
281 | spin_lock_irq(&zone->lru_lock); |
282 | for (; low_pfn < end_pfn; low_pfn++) { | |
283 | struct page *page; | |
b2eef8c0 AA |
284 | bool locked = true; |
285 | ||
286 | /* give a chance to irqs before checking need_resched() */ | |
287 | if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { | |
288 | spin_unlock_irq(&zone->lru_lock); | |
289 | locked = false; | |
290 | } | |
291 | if (need_resched() || spin_is_contended(&zone->lru_lock)) { | |
292 | if (locked) | |
293 | spin_unlock_irq(&zone->lru_lock); | |
294 | cond_resched(); | |
295 | spin_lock_irq(&zone->lru_lock); | |
296 | if (fatal_signal_pending(current)) | |
297 | break; | |
298 | } else if (!locked) | |
299 | spin_lock_irq(&zone->lru_lock); | |
300 | ||
748446bb MG |
301 | if (!pfn_valid_within(low_pfn)) |
302 | continue; | |
b7aba698 | 303 | nr_scanned++; |
748446bb MG |
304 | |
305 | /* Get the page and skip if free */ | |
306 | page = pfn_to_page(low_pfn); | |
307 | if (PageBuddy(page)) | |
308 | continue; | |
309 | ||
9927af74 MG |
310 | /* |
311 | * For async migration, also only scan in MOVABLE blocks. Async | |
312 | * migration is optimistic to see if the minimum amount of work | |
313 | * satisfies the allocation | |
314 | */ | |
315 | pageblock_nr = low_pfn >> pageblock_order; | |
316 | if (!cc->sync && last_pageblock_nr != pageblock_nr && | |
317 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { | |
318 | low_pfn += pageblock_nr_pages; | |
319 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; | |
320 | last_pageblock_nr = pageblock_nr; | |
321 | continue; | |
322 | } | |
323 | ||
bc835011 AA |
324 | if (!PageLRU(page)) |
325 | continue; | |
326 | ||
327 | /* | |
328 | * PageLRU is set, and lru_lock excludes isolation, | |
329 | * splitting and collapsing (collapsing has already | |
330 | * happened if PageLRU is set). | |
331 | */ | |
332 | if (PageTransHuge(page)) { | |
333 | low_pfn += (1 << compound_order(page)) - 1; | |
334 | continue; | |
335 | } | |
336 | ||
748446bb MG |
337 | /* Try isolate the page */ |
338 | if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0) | |
339 | continue; | |
340 | ||
bc835011 AA |
341 | VM_BUG_ON(PageTransCompound(page)); |
342 | ||
748446bb MG |
343 | /* Successfully isolated */ |
344 | del_page_from_lru_list(zone, page, page_lru(page)); | |
345 | list_add(&page->lru, migratelist); | |
748446bb | 346 | cc->nr_migratepages++; |
b7aba698 | 347 | nr_isolated++; |
748446bb MG |
348 | |
349 | /* Avoid isolating too much */ | |
350 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) | |
351 | break; | |
352 | } | |
353 | ||
354 | acct_isolated(zone, cc); | |
355 | ||
356 | spin_unlock_irq(&zone->lru_lock); | |
357 | cc->migrate_pfn = low_pfn; | |
358 | ||
b7aba698 MG |
359 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); |
360 | ||
748446bb MG |
361 | return cc->nr_migratepages; |
362 | } | |
363 | ||
364 | /* | |
365 | * This is a migrate-callback that "allocates" freepages by taking pages | |
366 | * from the isolated freelists in the block we are migrating to. | |
367 | */ | |
368 | static struct page *compaction_alloc(struct page *migratepage, | |
369 | unsigned long data, | |
370 | int **result) | |
371 | { | |
372 | struct compact_control *cc = (struct compact_control *)data; | |
373 | struct page *freepage; | |
374 | ||
375 | /* Isolate free pages if necessary */ | |
376 | if (list_empty(&cc->freepages)) { | |
377 | isolate_freepages(cc->zone, cc); | |
378 | ||
379 | if (list_empty(&cc->freepages)) | |
380 | return NULL; | |
381 | } | |
382 | ||
383 | freepage = list_entry(cc->freepages.next, struct page, lru); | |
384 | list_del(&freepage->lru); | |
385 | cc->nr_freepages--; | |
386 | ||
387 | return freepage; | |
388 | } | |
389 | ||
390 | /* | |
391 | * We cannot control nr_migratepages and nr_freepages fully when migration is | |
392 | * running as migrate_pages() has no knowledge of compact_control. When | |
393 | * migration is complete, we count the number of pages on the lists by hand. | |
394 | */ | |
395 | static void update_nr_listpages(struct compact_control *cc) | |
396 | { | |
397 | int nr_migratepages = 0; | |
398 | int nr_freepages = 0; | |
399 | struct page *page; | |
400 | ||
401 | list_for_each_entry(page, &cc->migratepages, lru) | |
402 | nr_migratepages++; | |
403 | list_for_each_entry(page, &cc->freepages, lru) | |
404 | nr_freepages++; | |
405 | ||
406 | cc->nr_migratepages = nr_migratepages; | |
407 | cc->nr_freepages = nr_freepages; | |
408 | } | |
409 | ||
410 | static int compact_finished(struct zone *zone, | |
5a03b051 | 411 | struct compact_control *cc) |
748446bb | 412 | { |
56de7263 | 413 | unsigned int order; |
5a03b051 | 414 | unsigned long watermark; |
56de7263 | 415 | |
748446bb MG |
416 | if (fatal_signal_pending(current)) |
417 | return COMPACT_PARTIAL; | |
418 | ||
419 | /* Compaction run completes if the migrate and free scanner meet */ | |
420 | if (cc->free_pfn <= cc->migrate_pfn) | |
421 | return COMPACT_COMPLETE; | |
422 | ||
56de7263 | 423 | /* Compaction run is not finished if the watermark is not met */ |
d527caf2 | 424 | watermark = low_wmark_pages(zone); |
5a03b051 AA |
425 | watermark += (1 << cc->order); |
426 | ||
56de7263 MG |
427 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) |
428 | return COMPACT_CONTINUE; | |
429 | ||
82478fb7 JW |
430 | /* |
431 | * order == -1 is expected when compacting via | |
432 | * /proc/sys/vm/compact_memory | |
433 | */ | |
56de7263 MG |
434 | if (cc->order == -1) |
435 | return COMPACT_CONTINUE; | |
436 | ||
437 | /* Direct compactor: Is a suitable page free? */ | |
438 | for (order = cc->order; order < MAX_ORDER; order++) { | |
439 | /* Job done if page is free of the right migratetype */ | |
440 | if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) | |
441 | return COMPACT_PARTIAL; | |
442 | ||
443 | /* Job done if allocation would set block type */ | |
444 | if (order >= pageblock_order && zone->free_area[order].nr_free) | |
445 | return COMPACT_PARTIAL; | |
446 | } | |
447 | ||
748446bb MG |
448 | return COMPACT_CONTINUE; |
449 | } | |
450 | ||
3e7d3449 MG |
451 | /* |
452 | * compaction_suitable: Is this suitable to run compaction on this zone now? | |
453 | * Returns | |
454 | * COMPACT_SKIPPED - If there are too few free pages for compaction | |
455 | * COMPACT_PARTIAL - If the allocation would succeed without compaction | |
456 | * COMPACT_CONTINUE - If compaction should run now | |
457 | */ | |
458 | unsigned long compaction_suitable(struct zone *zone, int order) | |
459 | { | |
460 | int fragindex; | |
461 | unsigned long watermark; | |
462 | ||
463 | /* | |
464 | * Watermarks for order-0 must be met for compaction. Note the 2UL. | |
465 | * This is because during migration, copies of pages need to be | |
466 | * allocated and for a short time, the footprint is higher | |
467 | */ | |
468 | watermark = low_wmark_pages(zone) + (2UL << order); | |
469 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) | |
470 | return COMPACT_SKIPPED; | |
471 | ||
82478fb7 JW |
472 | /* |
473 | * order == -1 is expected when compacting via | |
474 | * /proc/sys/vm/compact_memory | |
475 | */ | |
476 | if (order == -1) | |
477 | return COMPACT_CONTINUE; | |
478 | ||
3e7d3449 MG |
479 | /* |
480 | * fragmentation index determines if allocation failures are due to | |
481 | * low memory or external fragmentation | |
482 | * | |
483 | * index of -1 implies allocations might succeed dependingon watermarks | |
484 | * index towards 0 implies failure is due to lack of memory | |
485 | * index towards 1000 implies failure is due to fragmentation | |
486 | * | |
487 | * Only compact if a failure would be due to fragmentation. | |
488 | */ | |
489 | fragindex = fragmentation_index(zone, order); | |
490 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) | |
491 | return COMPACT_SKIPPED; | |
492 | ||
493 | if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) | |
494 | return COMPACT_PARTIAL; | |
495 | ||
496 | return COMPACT_CONTINUE; | |
497 | } | |
498 | ||
748446bb MG |
499 | static int compact_zone(struct zone *zone, struct compact_control *cc) |
500 | { | |
501 | int ret; | |
502 | ||
3e7d3449 MG |
503 | ret = compaction_suitable(zone, cc->order); |
504 | switch (ret) { | |
505 | case COMPACT_PARTIAL: | |
506 | case COMPACT_SKIPPED: | |
507 | /* Compaction is likely to fail */ | |
508 | return ret; | |
509 | case COMPACT_CONTINUE: | |
510 | /* Fall through to compaction */ | |
511 | ; | |
512 | } | |
513 | ||
748446bb MG |
514 | /* Setup to move all movable pages to the end of the zone */ |
515 | cc->migrate_pfn = zone->zone_start_pfn; | |
516 | cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; | |
517 | cc->free_pfn &= ~(pageblock_nr_pages-1); | |
518 | ||
519 | migrate_prep_local(); | |
520 | ||
521 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { | |
522 | unsigned long nr_migrate, nr_remaining; | |
9d502c1c | 523 | int err; |
748446bb MG |
524 | |
525 | if (!isolate_migratepages(zone, cc)) | |
526 | continue; | |
527 | ||
528 | nr_migrate = cc->nr_migratepages; | |
9d502c1c | 529 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
7f0f2496 | 530 | (unsigned long)cc, false, |
77f1fe6b | 531 | cc->sync); |
748446bb MG |
532 | update_nr_listpages(cc); |
533 | nr_remaining = cc->nr_migratepages; | |
534 | ||
535 | count_vm_event(COMPACTBLOCKS); | |
536 | count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); | |
537 | if (nr_remaining) | |
538 | count_vm_events(COMPACTPAGEFAILED, nr_remaining); | |
b7aba698 MG |
539 | trace_mm_compaction_migratepages(nr_migrate - nr_remaining, |
540 | nr_remaining); | |
748446bb MG |
541 | |
542 | /* Release LRU pages not migrated */ | |
9d502c1c | 543 | if (err) { |
748446bb MG |
544 | putback_lru_pages(&cc->migratepages); |
545 | cc->nr_migratepages = 0; | |
546 | } | |
547 | ||
548 | } | |
549 | ||
550 | /* Release free pages and check accounting */ | |
551 | cc->nr_freepages -= release_freepages(&cc->freepages); | |
552 | VM_BUG_ON(cc->nr_freepages != 0); | |
553 | ||
554 | return ret; | |
555 | } | |
76ab0f53 | 556 | |
3e7d3449 | 557 | unsigned long compact_zone_order(struct zone *zone, |
5a03b051 | 558 | int order, gfp_t gfp_mask, |
d527caf2 | 559 | bool sync) |
56de7263 MG |
560 | { |
561 | struct compact_control cc = { | |
562 | .nr_freepages = 0, | |
563 | .nr_migratepages = 0, | |
564 | .order = order, | |
565 | .migratetype = allocflags_to_migratetype(gfp_mask), | |
566 | .zone = zone, | |
77f1fe6b | 567 | .sync = sync, |
56de7263 MG |
568 | }; |
569 | INIT_LIST_HEAD(&cc.freepages); | |
570 | INIT_LIST_HEAD(&cc.migratepages); | |
571 | ||
572 | return compact_zone(zone, &cc); | |
573 | } | |
574 | ||
5e771905 MG |
575 | int sysctl_extfrag_threshold = 500; |
576 | ||
56de7263 MG |
577 | /** |
578 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation | |
579 | * @zonelist: The zonelist used for the current allocation | |
580 | * @order: The order of the current allocation | |
581 | * @gfp_mask: The GFP mask of the current allocation | |
582 | * @nodemask: The allowed nodes to allocate from | |
77f1fe6b | 583 | * @sync: Whether migration is synchronous or not |
56de7263 MG |
584 | * |
585 | * This is the main entry point for direct page compaction. | |
586 | */ | |
587 | unsigned long try_to_compact_pages(struct zonelist *zonelist, | |
77f1fe6b MG |
588 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
589 | bool sync) | |
56de7263 MG |
590 | { |
591 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | |
592 | int may_enter_fs = gfp_mask & __GFP_FS; | |
593 | int may_perform_io = gfp_mask & __GFP_IO; | |
56de7263 MG |
594 | struct zoneref *z; |
595 | struct zone *zone; | |
596 | int rc = COMPACT_SKIPPED; | |
597 | ||
598 | /* | |
599 | * Check whether it is worth even starting compaction. The order check is | |
600 | * made because an assumption is made that the page allocator can satisfy | |
601 | * the "cheaper" orders without taking special steps | |
602 | */ | |
c5a73c3d | 603 | if (!order || !may_enter_fs || !may_perform_io) |
56de7263 MG |
604 | return rc; |
605 | ||
606 | count_vm_event(COMPACTSTALL); | |
607 | ||
608 | /* Compact each zone in the list */ | |
609 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, | |
610 | nodemask) { | |
56de7263 MG |
611 | int status; |
612 | ||
d527caf2 | 613 | status = compact_zone_order(zone, order, gfp_mask, sync); |
56de7263 MG |
614 | rc = max(status, rc); |
615 | ||
3e7d3449 MG |
616 | /* If a normal allocation would succeed, stop compacting */ |
617 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) | |
56de7263 MG |
618 | break; |
619 | } | |
620 | ||
621 | return rc; | |
622 | } | |
623 | ||
624 | ||
76ab0f53 MG |
625 | /* Compact all zones within a node */ |
626 | static int compact_node(int nid) | |
627 | { | |
628 | int zoneid; | |
629 | pg_data_t *pgdat; | |
630 | struct zone *zone; | |
631 | ||
632 | if (nid < 0 || nid >= nr_node_ids || !node_online(nid)) | |
633 | return -EINVAL; | |
634 | pgdat = NODE_DATA(nid); | |
635 | ||
636 | /* Flush pending updates to the LRU lists */ | |
637 | lru_add_drain_all(); | |
638 | ||
639 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { | |
640 | struct compact_control cc = { | |
641 | .nr_freepages = 0, | |
642 | .nr_migratepages = 0, | |
56de7263 | 643 | .order = -1, |
76ab0f53 MG |
644 | }; |
645 | ||
646 | zone = &pgdat->node_zones[zoneid]; | |
647 | if (!populated_zone(zone)) | |
648 | continue; | |
649 | ||
650 | cc.zone = zone; | |
651 | INIT_LIST_HEAD(&cc.freepages); | |
652 | INIT_LIST_HEAD(&cc.migratepages); | |
653 | ||
654 | compact_zone(zone, &cc); | |
655 | ||
656 | VM_BUG_ON(!list_empty(&cc.freepages)); | |
657 | VM_BUG_ON(!list_empty(&cc.migratepages)); | |
658 | } | |
659 | ||
660 | return 0; | |
661 | } | |
662 | ||
663 | /* Compact all nodes in the system */ | |
664 | static int compact_nodes(void) | |
665 | { | |
666 | int nid; | |
667 | ||
668 | for_each_online_node(nid) | |
669 | compact_node(nid); | |
670 | ||
671 | return COMPACT_COMPLETE; | |
672 | } | |
673 | ||
674 | /* The written value is actually unused, all memory is compacted */ | |
675 | int sysctl_compact_memory; | |
676 | ||
677 | /* This is the entry point for compacting all nodes via /proc/sys/vm */ | |
678 | int sysctl_compaction_handler(struct ctl_table *table, int write, | |
679 | void __user *buffer, size_t *length, loff_t *ppos) | |
680 | { | |
681 | if (write) | |
682 | return compact_nodes(); | |
683 | ||
684 | return 0; | |
685 | } | |
ed4a6d7f | 686 | |
5e771905 MG |
687 | int sysctl_extfrag_handler(struct ctl_table *table, int write, |
688 | void __user *buffer, size_t *length, loff_t *ppos) | |
689 | { | |
690 | proc_dointvec_minmax(table, write, buffer, length, ppos); | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
ed4a6d7f MG |
695 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
696 | ssize_t sysfs_compact_node(struct sys_device *dev, | |
697 | struct sysdev_attribute *attr, | |
698 | const char *buf, size_t count) | |
699 | { | |
700 | compact_node(dev->id); | |
701 | ||
702 | return count; | |
703 | } | |
704 | static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); | |
705 | ||
706 | int compaction_register_node(struct node *node) | |
707 | { | |
708 | return sysdev_create_file(&node->sysdev, &attr_compact); | |
709 | } | |
710 | ||
711 | void compaction_unregister_node(struct node *node) | |
712 | { | |
713 | return sysdev_remove_file(&node->sysdev, &attr_compact); | |
714 | } | |
715 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |