2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
19 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/compaction.h>
24 static unsigned long release_freepages(struct list_head
*freelist
)
26 struct page
*page
, *next
;
27 unsigned long count
= 0;
29 list_for_each_entry_safe(page
, next
, freelist
, lru
) {
38 static void map_pages(struct list_head
*list
)
42 list_for_each_entry(page
, list
, lru
) {
43 arch_alloc_page(page
, 0);
44 kernel_map_pages(page
, 1, 1);
48 static inline bool migrate_async_suitable(int migratetype
)
50 return is_migrate_cma(migratetype
) || migratetype
== MIGRATE_MOVABLE
;
53 static inline bool should_release_lock(spinlock_t
*lock
)
55 return need_resched() || spin_is_contended(lock
);
59 * Compaction requires the taking of some coarse locks that are potentially
60 * very heavily contended. Check if the process needs to be scheduled or
61 * if the lock is contended. For async compaction, back out in the event
62 * if contention is severe. For sync compaction, schedule.
64 * Returns true if the lock is held.
65 * Returns false if the lock is released and compaction should abort
67 static bool compact_checklock_irqsave(spinlock_t
*lock
, unsigned long *flags
,
68 bool locked
, struct compact_control
*cc
)
70 if (should_release_lock(lock
)) {
72 spin_unlock_irqrestore(lock
, *flags
);
76 /* async aborts if taking too long or contended */
86 spin_lock_irqsave(lock
, *flags
);
90 static inline bool compact_trylock_irqsave(spinlock_t
*lock
,
91 unsigned long *flags
, struct compact_control
*cc
)
93 return compact_checklock_irqsave(lock
, flags
, false, cc
);
96 static void compact_capture_page(struct compact_control
*cc
)
99 int mtype
, mtype_low
, mtype_high
;
101 if (!cc
->page
|| *cc
->page
)
105 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
106 * regardless of the migratetype of the freelist is is captured from.
107 * This is fine because the order for a high-order MIGRATE_MOVABLE
108 * allocation is typically at least a pageblock size and overall
109 * fragmentation is not impaired. Other allocation types must
110 * capture pages from their own migratelist because otherwise they
111 * could pollute other pageblocks like MIGRATE_MOVABLE with
112 * difficult to move pages and making fragmentation worse overall.
114 if (cc
->migratetype
== MIGRATE_MOVABLE
) {
116 mtype_high
= MIGRATE_PCPTYPES
;
118 mtype_low
= cc
->migratetype
;
119 mtype_high
= cc
->migratetype
+ 1;
122 /* Speculatively examine the free lists without zone lock */
123 for (mtype
= mtype_low
; mtype
< mtype_high
; mtype
++) {
125 for (order
= cc
->order
; order
< MAX_ORDER
; order
++) {
127 struct free_area
*area
;
128 area
= &(cc
->zone
->free_area
[order
]);
129 if (list_empty(&area
->free_list
[mtype
]))
132 /* Take the lock and attempt capture of the page */
133 if (!compact_trylock_irqsave(&cc
->zone
->lock
, &flags
, cc
))
135 if (!list_empty(&area
->free_list
[mtype
])) {
136 page
= list_entry(area
->free_list
[mtype
].next
,
138 if (capture_free_page(page
, cc
->order
, mtype
)) {
139 spin_unlock_irqrestore(&cc
->zone
->lock
,
145 spin_unlock_irqrestore(&cc
->zone
->lock
, flags
);
151 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
152 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
153 * pages inside of the pageblock (even though it may still end up isolating
156 static unsigned long isolate_freepages_block(unsigned long blockpfn
,
157 unsigned long end_pfn
,
158 struct list_head
*freelist
,
161 int nr_scanned
= 0, total_isolated
= 0;
164 cursor
= pfn_to_page(blockpfn
);
166 /* Isolate free pages. This assumes the block is valid */
167 for (; blockpfn
< end_pfn
; blockpfn
++, cursor
++) {
169 struct page
*page
= cursor
;
171 if (!pfn_valid_within(blockpfn
)) {
178 if (!PageBuddy(page
)) {
184 /* Found a free page, break it into order-0 pages */
185 isolated
= split_free_page(page
);
186 if (!isolated
&& strict
)
188 total_isolated
+= isolated
;
189 for (i
= 0; i
< isolated
; i
++) {
190 list_add(&page
->lru
, freelist
);
194 /* If a page was split, advance to the end of it */
196 blockpfn
+= isolated
- 1;
197 cursor
+= isolated
- 1;
201 trace_mm_compaction_isolate_freepages(nr_scanned
, total_isolated
);
202 return total_isolated
;
206 * isolate_freepages_range() - isolate free pages.
207 * @start_pfn: The first PFN to start isolating.
208 * @end_pfn: The one-past-last PFN.
210 * Non-free pages, invalid PFNs, or zone boundaries within the
211 * [start_pfn, end_pfn) range are considered errors, cause function to
212 * undo its actions and return zero.
214 * Otherwise, function returns one-past-the-last PFN of isolated page
215 * (which may be greater then end_pfn if end fell in a middle of
219 isolate_freepages_range(unsigned long start_pfn
, unsigned long end_pfn
)
221 unsigned long isolated
, pfn
, block_end_pfn
, flags
;
222 struct zone
*zone
= NULL
;
225 if (pfn_valid(start_pfn
))
226 zone
= page_zone(pfn_to_page(start_pfn
));
228 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= isolated
) {
229 if (!pfn_valid(pfn
) || zone
!= page_zone(pfn_to_page(pfn
)))
233 * On subsequent iterations ALIGN() is actually not needed,
234 * but we keep it that we not to complicate the code.
236 block_end_pfn
= ALIGN(pfn
+ 1, pageblock_nr_pages
);
237 block_end_pfn
= min(block_end_pfn
, end_pfn
);
239 spin_lock_irqsave(&zone
->lock
, flags
);
240 isolated
= isolate_freepages_block(pfn
, block_end_pfn
,
242 spin_unlock_irqrestore(&zone
->lock
, flags
);
245 * In strict mode, isolate_freepages_block() returns 0 if
246 * there are any holes in the block (ie. invalid PFNs or
253 * If we managed to isolate pages, it is always (1 << n) *
254 * pageblock_nr_pages for some non-negative n. (Max order
255 * page may span two pageblocks).
259 /* split_free_page does not map the pages */
260 map_pages(&freelist
);
263 /* Loop terminated early, cleanup. */
264 release_freepages(&freelist
);
268 /* We don't use freelists for anything. */
272 /* Update the number of anon and file isolated pages in the zone */
273 static void acct_isolated(struct zone
*zone
, bool locked
, struct compact_control
*cc
)
276 unsigned int count
[2] = { 0, };
278 list_for_each_entry(page
, &cc
->migratepages
, lru
)
279 count
[!!page_is_file_cache(page
)]++;
281 /* If locked we can use the interrupt unsafe versions */
283 __mod_zone_page_state(zone
, NR_ISOLATED_ANON
, count
[0]);
284 __mod_zone_page_state(zone
, NR_ISOLATED_FILE
, count
[1]);
286 mod_zone_page_state(zone
, NR_ISOLATED_ANON
, count
[0]);
287 mod_zone_page_state(zone
, NR_ISOLATED_FILE
, count
[1]);
291 /* Similar to reclaim, but different enough that they don't share logic */
292 static bool too_many_isolated(struct zone
*zone
)
294 unsigned long active
, inactive
, isolated
;
296 inactive
= zone_page_state(zone
, NR_INACTIVE_FILE
) +
297 zone_page_state(zone
, NR_INACTIVE_ANON
);
298 active
= zone_page_state(zone
, NR_ACTIVE_FILE
) +
299 zone_page_state(zone
, NR_ACTIVE_ANON
);
300 isolated
= zone_page_state(zone
, NR_ISOLATED_FILE
) +
301 zone_page_state(zone
, NR_ISOLATED_ANON
);
303 return isolated
> (inactive
+ active
) / 2;
307 * isolate_migratepages_range() - isolate all migrate-able pages in range.
308 * @zone: Zone pages are in.
309 * @cc: Compaction control structure.
310 * @low_pfn: The first PFN of the range.
311 * @end_pfn: The one-past-the-last PFN of the range.
313 * Isolate all pages that can be migrated from the range specified by
314 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
315 * pending), otherwise PFN of the first page that was not scanned
316 * (which may be both less, equal to or more then end_pfn).
318 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
321 * Apart from cc->migratepages and cc->nr_migratetypes this function
322 * does not modify any cc's fields, in particular it does not modify
323 * (or read for that matter) cc->migrate_pfn.
326 isolate_migratepages_range(struct zone
*zone
, struct compact_control
*cc
,
327 unsigned long low_pfn
, unsigned long end_pfn
)
329 unsigned long last_pageblock_nr
= 0, pageblock_nr
;
330 unsigned long nr_scanned
= 0, nr_isolated
= 0;
331 struct list_head
*migratelist
= &cc
->migratepages
;
332 isolate_mode_t mode
= 0;
333 struct lruvec
*lruvec
;
338 * Ensure that there are not too many pages isolated from the LRU
339 * list by either parallel reclaimers or compaction. If there are,
340 * delay for some time until fewer pages are isolated
342 while (unlikely(too_many_isolated(zone
))) {
343 /* async migration should just abort */
347 congestion_wait(BLK_RW_ASYNC
, HZ
/10);
349 if (fatal_signal_pending(current
))
353 /* Time to isolate some pages for migration */
355 for (; low_pfn
< end_pfn
; low_pfn
++) {
358 /* give a chance to irqs before checking need_resched() */
359 if (locked
&& !((low_pfn
+1) % SWAP_CLUSTER_MAX
)) {
360 if (should_release_lock(&zone
->lru_lock
)) {
361 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
367 * migrate_pfn does not necessarily start aligned to a
368 * pageblock. Ensure that pfn_valid is called when moving
369 * into a new MAX_ORDER_NR_PAGES range in case of large
370 * memory holes within the zone
372 if ((low_pfn
& (MAX_ORDER_NR_PAGES
- 1)) == 0) {
373 if (!pfn_valid(low_pfn
)) {
374 low_pfn
+= MAX_ORDER_NR_PAGES
- 1;
379 if (!pfn_valid_within(low_pfn
))
384 * Get the page and ensure the page is within the same zone.
385 * See the comment in isolate_freepages about overlapping
386 * nodes. It is deliberate that the new zone lock is not taken
387 * as memory compaction should not move pages between nodes.
389 page
= pfn_to_page(low_pfn
);
390 if (page_zone(page
) != zone
)
398 * For async migration, also only scan in MOVABLE blocks. Async
399 * migration is optimistic to see if the minimum amount of work
400 * satisfies the allocation
402 pageblock_nr
= low_pfn
>> pageblock_order
;
403 if (!cc
->sync
&& last_pageblock_nr
!= pageblock_nr
&&
404 !migrate_async_suitable(get_pageblock_migratetype(page
))) {
408 /* Check may be lockless but that's ok as we recheck later */
413 * PageLRU is set. lru_lock normally excludes isolation
414 * splitting and collapsing (collapsing has already happened
415 * if PageLRU is set) but the lock is not necessarily taken
416 * here and it is wasteful to take it just to check transhuge.
417 * Check TransHuge without lock and skip the whole pageblock if
418 * it's either a transhuge or hugetlbfs page, as calling
419 * compound_order() without preventing THP from splitting the
420 * page underneath us may return surprising results.
422 if (PageTransHuge(page
)) {
425 low_pfn
+= (1 << compound_order(page
)) - 1;
429 /* Check if it is ok to still hold the lock */
430 locked
= compact_checklock_irqsave(&zone
->lru_lock
, &flags
,
432 if (!locked
|| fatal_signal_pending(current
))
435 /* Recheck PageLRU and PageTransHuge under lock */
438 if (PageTransHuge(page
)) {
439 low_pfn
+= (1 << compound_order(page
)) - 1;
444 mode
|= ISOLATE_ASYNC_MIGRATE
;
446 lruvec
= mem_cgroup_page_lruvec(page
, zone
);
448 /* Try isolate the page */
449 if (__isolate_lru_page(page
, mode
) != 0)
452 VM_BUG_ON(PageTransCompound(page
));
454 /* Successfully isolated */
455 del_page_from_lru_list(page
, lruvec
, page_lru(page
));
456 list_add(&page
->lru
, migratelist
);
457 cc
->nr_migratepages
++;
460 /* Avoid isolating too much */
461 if (cc
->nr_migratepages
== COMPACT_CLUSTER_MAX
) {
469 low_pfn
+= pageblock_nr_pages
;
470 low_pfn
= ALIGN(low_pfn
, pageblock_nr_pages
) - 1;
471 last_pageblock_nr
= pageblock_nr
;
474 acct_isolated(zone
, locked
, cc
);
477 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
479 trace_mm_compaction_isolate_migratepages(nr_scanned
, nr_isolated
);
484 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
485 #ifdef CONFIG_COMPACTION
487 /* Returns true if the page is within a block suitable for migration to */
488 static bool suitable_migration_target(struct page
*page
)
491 int migratetype
= get_pageblock_migratetype(page
);
493 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
494 if (migratetype
== MIGRATE_ISOLATE
|| migratetype
== MIGRATE_RESERVE
)
497 /* If the page is a large free page, then allow migration */
498 if (PageBuddy(page
) && page_order(page
) >= pageblock_order
)
501 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
502 if (migrate_async_suitable(migratetype
))
505 /* Otherwise skip the block */
510 * Returns the start pfn of the last page block in a zone. This is the starting
511 * point for full compaction of a zone. Compaction searches for free pages from
512 * the end of each zone, while isolate_freepages_block scans forward inside each
515 static unsigned long start_free_pfn(struct zone
*zone
)
517 unsigned long free_pfn
;
518 free_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
519 free_pfn
&= ~(pageblock_nr_pages
-1);
524 * Based on information in the current compact_control, find blocks
525 * suitable for isolating free pages from and then isolate them.
527 static void isolate_freepages(struct zone
*zone
,
528 struct compact_control
*cc
)
531 unsigned long high_pfn
, low_pfn
, pfn
, zone_end_pfn
, end_pfn
;
533 int nr_freepages
= cc
->nr_freepages
;
534 struct list_head
*freelist
= &cc
->freepages
;
537 * Initialise the free scanner. The starting point is where we last
538 * scanned from (or the end of the zone if starting). The low point
539 * is the end of the pageblock the migration scanner is using.
542 low_pfn
= cc
->migrate_pfn
+ pageblock_nr_pages
;
545 * Take care that if the migration scanner is at the end of the zone
546 * that the free scanner does not accidentally move to the next zone
547 * in the next isolation cycle.
549 high_pfn
= min(low_pfn
, pfn
);
551 zone_end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
554 * Isolate free pages until enough are available to migrate the
555 * pages on cc->migratepages. We stop searching if the migrate
556 * and free page scanners meet or enough free pages are isolated.
558 for (; pfn
> low_pfn
&& cc
->nr_migratepages
> nr_freepages
;
559 pfn
-= pageblock_nr_pages
) {
560 unsigned long isolated
;
566 * Check for overlapping nodes/zones. It's possible on some
567 * configurations to have a setup like
569 * i.e. it's possible that all pages within a zones range of
570 * pages do not belong to a single zone.
572 page
= pfn_to_page(pfn
);
573 if (page_zone(page
) != zone
)
576 /* Check the block is suitable for migration */
577 if (!suitable_migration_target(page
))
581 * Found a block suitable for isolating free pages from. Now
582 * we disabled interrupts, double check things are ok and
583 * isolate the pages. This is to minimise the time IRQs
589 * The zone lock must be held to isolate freepages. This
590 * unfortunately this is a very coarse lock and can be
591 * heavily contended if there are parallel allocations
592 * or parallel compactions. For async compaction do not
595 if (!compact_trylock_irqsave(&zone
->lock
, &flags
, cc
))
597 if (suitable_migration_target(page
)) {
598 end_pfn
= min(pfn
+ pageblock_nr_pages
, zone_end_pfn
);
599 isolated
= isolate_freepages_block(pfn
, end_pfn
,
601 nr_freepages
+= isolated
;
603 spin_unlock_irqrestore(&zone
->lock
, flags
);
606 * Record the highest PFN we isolated pages from. When next
607 * looking for free pages, the search will restart here as
608 * page migration may have returned some pages to the allocator
611 high_pfn
= max(high_pfn
, pfn
);
614 * If the free scanner has wrapped, update
615 * compact_cached_free_pfn to point to the highest
616 * pageblock with free pages. This reduces excessive
617 * scanning of full pageblocks near the end of the
620 if (cc
->order
> 0 && cc
->wrapped
)
621 zone
->compact_cached_free_pfn
= high_pfn
;
625 /* split_free_page does not map the pages */
628 cc
->free_pfn
= high_pfn
;
629 cc
->nr_freepages
= nr_freepages
;
631 /* If compact_cached_free_pfn is reset then set it now */
632 if (cc
->order
> 0 && !cc
->wrapped
&&
633 zone
->compact_cached_free_pfn
== start_free_pfn(zone
))
634 zone
->compact_cached_free_pfn
= high_pfn
;
638 * This is a migrate-callback that "allocates" freepages by taking pages
639 * from the isolated freelists in the block we are migrating to.
641 static struct page
*compaction_alloc(struct page
*migratepage
,
645 struct compact_control
*cc
= (struct compact_control
*)data
;
646 struct page
*freepage
;
648 /* Isolate free pages if necessary */
649 if (list_empty(&cc
->freepages
)) {
650 isolate_freepages(cc
->zone
, cc
);
652 if (list_empty(&cc
->freepages
))
656 freepage
= list_entry(cc
->freepages
.next
, struct page
, lru
);
657 list_del(&freepage
->lru
);
664 * We cannot control nr_migratepages and nr_freepages fully when migration is
665 * running as migrate_pages() has no knowledge of compact_control. When
666 * migration is complete, we count the number of pages on the lists by hand.
668 static void update_nr_listpages(struct compact_control
*cc
)
670 int nr_migratepages
= 0;
671 int nr_freepages
= 0;
674 list_for_each_entry(page
, &cc
->migratepages
, lru
)
676 list_for_each_entry(page
, &cc
->freepages
, lru
)
679 cc
->nr_migratepages
= nr_migratepages
;
680 cc
->nr_freepages
= nr_freepages
;
683 /* possible outcome of isolate_migratepages */
685 ISOLATE_ABORT
, /* Abort compaction now */
686 ISOLATE_NONE
, /* No pages isolated, continue scanning */
687 ISOLATE_SUCCESS
, /* Pages isolated, migrate */
691 * Isolate all pages that can be migrated from the block pointed to by
692 * the migrate scanner within compact_control.
694 static isolate_migrate_t
isolate_migratepages(struct zone
*zone
,
695 struct compact_control
*cc
)
697 unsigned long low_pfn
, end_pfn
;
699 /* Do not scan outside zone boundaries */
700 low_pfn
= max(cc
->migrate_pfn
, zone
->zone_start_pfn
);
702 /* Only scan within a pageblock boundary */
703 end_pfn
= ALIGN(low_pfn
+ pageblock_nr_pages
, pageblock_nr_pages
);
705 /* Do not cross the free scanner or scan within a memory hole */
706 if (end_pfn
> cc
->free_pfn
|| !pfn_valid(low_pfn
)) {
707 cc
->migrate_pfn
= end_pfn
;
711 /* Perform the isolation */
712 low_pfn
= isolate_migratepages_range(zone
, cc
, low_pfn
, end_pfn
);
713 if (!low_pfn
|| cc
->contended
)
714 return ISOLATE_ABORT
;
716 cc
->migrate_pfn
= low_pfn
;
718 return ISOLATE_SUCCESS
;
721 static int compact_finished(struct zone
*zone
,
722 struct compact_control
*cc
)
724 unsigned long watermark
;
726 if (fatal_signal_pending(current
))
727 return COMPACT_PARTIAL
;
730 * A full (order == -1) compaction run starts at the beginning and
731 * end of a zone; it completes when the migrate and free scanner meet.
732 * A partial (order > 0) compaction can start with the free scanner
733 * at a random point in the zone, and may have to restart.
735 if (cc
->free_pfn
<= cc
->migrate_pfn
) {
736 if (cc
->order
> 0 && !cc
->wrapped
) {
737 /* We started partway through; restart at the end. */
738 unsigned long free_pfn
= start_free_pfn(zone
);
739 zone
->compact_cached_free_pfn
= free_pfn
;
740 cc
->free_pfn
= free_pfn
;
742 return COMPACT_CONTINUE
;
744 return COMPACT_COMPLETE
;
747 /* We wrapped around and ended up where we started. */
748 if (cc
->wrapped
&& cc
->free_pfn
<= cc
->start_free_pfn
)
749 return COMPACT_COMPLETE
;
752 * order == -1 is expected when compacting via
753 * /proc/sys/vm/compact_memory
756 return COMPACT_CONTINUE
;
758 /* Compaction run is not finished if the watermark is not met */
759 watermark
= low_wmark_pages(zone
);
760 watermark
+= (1 << cc
->order
);
762 if (!zone_watermark_ok(zone
, cc
->order
, watermark
, 0, 0))
763 return COMPACT_CONTINUE
;
765 /* Direct compactor: Is a suitable page free? */
767 /* Was a suitable page captured? */
769 return COMPACT_PARTIAL
;
772 for (order
= cc
->order
; order
< MAX_ORDER
; order
++) {
773 struct free_area
*area
= &zone
->free_area
[cc
->order
];
774 /* Job done if page is free of the right migratetype */
775 if (!list_empty(&area
->free_list
[cc
->migratetype
]))
776 return COMPACT_PARTIAL
;
778 /* Job done if allocation would set block type */
779 if (cc
->order
>= pageblock_order
&& area
->nr_free
)
780 return COMPACT_PARTIAL
;
784 return COMPACT_CONTINUE
;
788 * compaction_suitable: Is this suitable to run compaction on this zone now?
790 * COMPACT_SKIPPED - If there are too few free pages for compaction
791 * COMPACT_PARTIAL - If the allocation would succeed without compaction
792 * COMPACT_CONTINUE - If compaction should run now
794 unsigned long compaction_suitable(struct zone
*zone
, int order
)
797 unsigned long watermark
;
800 * order == -1 is expected when compacting via
801 * /proc/sys/vm/compact_memory
804 return COMPACT_CONTINUE
;
807 * Watermarks for order-0 must be met for compaction. Note the 2UL.
808 * This is because during migration, copies of pages need to be
809 * allocated and for a short time, the footprint is higher
811 watermark
= low_wmark_pages(zone
) + (2UL << order
);
812 if (!zone_watermark_ok(zone
, 0, watermark
, 0, 0))
813 return COMPACT_SKIPPED
;
816 * fragmentation index determines if allocation failures are due to
817 * low memory or external fragmentation
819 * index of -1000 implies allocations might succeed depending on
821 * index towards 0 implies failure is due to lack of memory
822 * index towards 1000 implies failure is due to fragmentation
824 * Only compact if a failure would be due to fragmentation.
826 fragindex
= fragmentation_index(zone
, order
);
827 if (fragindex
>= 0 && fragindex
<= sysctl_extfrag_threshold
)
828 return COMPACT_SKIPPED
;
830 if (fragindex
== -1000 && zone_watermark_ok(zone
, order
, watermark
,
832 return COMPACT_PARTIAL
;
834 return COMPACT_CONTINUE
;
837 static int compact_zone(struct zone
*zone
, struct compact_control
*cc
)
841 ret
= compaction_suitable(zone
, cc
->order
);
843 case COMPACT_PARTIAL
:
844 case COMPACT_SKIPPED
:
845 /* Compaction is likely to fail */
847 case COMPACT_CONTINUE
:
848 /* Fall through to compaction */
852 /* Setup to move all movable pages to the end of the zone */
853 cc
->migrate_pfn
= zone
->zone_start_pfn
;
856 /* Incremental compaction. Start where the last one stopped. */
857 cc
->free_pfn
= zone
->compact_cached_free_pfn
;
858 cc
->start_free_pfn
= cc
->free_pfn
;
860 /* Order == -1 starts at the end of the zone. */
861 cc
->free_pfn
= start_free_pfn(zone
);
864 migrate_prep_local();
866 while ((ret
= compact_finished(zone
, cc
)) == COMPACT_CONTINUE
) {
867 unsigned long nr_migrate
, nr_remaining
;
870 switch (isolate_migratepages(zone
, cc
)) {
872 ret
= COMPACT_PARTIAL
;
873 putback_lru_pages(&cc
->migratepages
);
874 cc
->nr_migratepages
= 0;
878 case ISOLATE_SUCCESS
:
882 nr_migrate
= cc
->nr_migratepages
;
883 err
= migrate_pages(&cc
->migratepages
, compaction_alloc
,
884 (unsigned long)cc
, false,
885 cc
->sync
? MIGRATE_SYNC_LIGHT
: MIGRATE_ASYNC
);
886 update_nr_listpages(cc
);
887 nr_remaining
= cc
->nr_migratepages
;
889 count_vm_event(COMPACTBLOCKS
);
890 count_vm_events(COMPACTPAGES
, nr_migrate
- nr_remaining
);
892 count_vm_events(COMPACTPAGEFAILED
, nr_remaining
);
893 trace_mm_compaction_migratepages(nr_migrate
- nr_remaining
,
896 /* Release LRU pages not migrated */
898 putback_lru_pages(&cc
->migratepages
);
899 cc
->nr_migratepages
= 0;
900 if (err
== -ENOMEM
) {
901 ret
= COMPACT_PARTIAL
;
906 /* Capture a page now if it is a suitable size */
907 compact_capture_page(cc
);
911 /* Release free pages and check accounting */
912 cc
->nr_freepages
-= release_freepages(&cc
->freepages
);
913 VM_BUG_ON(cc
->nr_freepages
!= 0);
918 static unsigned long compact_zone_order(struct zone
*zone
,
919 int order
, gfp_t gfp_mask
,
920 bool sync
, bool *contended
,
924 struct compact_control cc
= {
926 .nr_migratepages
= 0,
928 .migratetype
= allocflags_to_migratetype(gfp_mask
),
933 INIT_LIST_HEAD(&cc
.freepages
);
934 INIT_LIST_HEAD(&cc
.migratepages
);
936 ret
= compact_zone(zone
, &cc
);
938 VM_BUG_ON(!list_empty(&cc
.freepages
));
939 VM_BUG_ON(!list_empty(&cc
.migratepages
));
941 *contended
= cc
.contended
;
945 int sysctl_extfrag_threshold
= 500;
948 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
949 * @zonelist: The zonelist used for the current allocation
950 * @order: The order of the current allocation
951 * @gfp_mask: The GFP mask of the current allocation
952 * @nodemask: The allowed nodes to allocate from
953 * @sync: Whether migration is synchronous or not
954 * @contended: Return value that is true if compaction was aborted due to lock contention
955 * @page: Optionally capture a free page of the requested order during compaction
957 * This is the main entry point for direct page compaction.
959 unsigned long try_to_compact_pages(struct zonelist
*zonelist
,
960 int order
, gfp_t gfp_mask
, nodemask_t
*nodemask
,
961 bool sync
, bool *contended
, struct page
**page
)
963 enum zone_type high_zoneidx
= gfp_zone(gfp_mask
);
964 int may_enter_fs
= gfp_mask
& __GFP_FS
;
965 int may_perform_io
= gfp_mask
& __GFP_IO
;
968 int rc
= COMPACT_SKIPPED
;
971 /* Check if the GFP flags allow compaction */
972 if (!order
|| !may_enter_fs
|| !may_perform_io
)
975 count_vm_event(COMPACTSTALL
);
978 if (allocflags_to_migratetype(gfp_mask
) == MIGRATE_MOVABLE
)
979 alloc_flags
|= ALLOC_CMA
;
981 /* Compact each zone in the list */
982 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, high_zoneidx
,
986 status
= compact_zone_order(zone
, order
, gfp_mask
, sync
,
988 rc
= max(status
, rc
);
990 /* If a normal allocation would succeed, stop compacting */
991 if (zone_watermark_ok(zone
, order
, low_wmark_pages(zone
), 0,
1000 /* Compact all zones within a node */
1001 static int __compact_pgdat(pg_data_t
*pgdat
, struct compact_control
*cc
)
1006 for (zoneid
= 0; zoneid
< MAX_NR_ZONES
; zoneid
++) {
1008 zone
= &pgdat
->node_zones
[zoneid
];
1009 if (!populated_zone(zone
))
1012 cc
->nr_freepages
= 0;
1013 cc
->nr_migratepages
= 0;
1015 INIT_LIST_HEAD(&cc
->freepages
);
1016 INIT_LIST_HEAD(&cc
->migratepages
);
1018 if (cc
->order
== -1 || !compaction_deferred(zone
, cc
->order
))
1019 compact_zone(zone
, cc
);
1021 if (cc
->order
> 0) {
1022 int ok
= zone_watermark_ok(zone
, cc
->order
,
1023 low_wmark_pages(zone
), 0, 0);
1024 if (ok
&& cc
->order
>= zone
->compact_order_failed
)
1025 zone
->compact_order_failed
= cc
->order
+ 1;
1026 /* Currently async compaction is never deferred. */
1027 else if (!ok
&& cc
->sync
)
1028 defer_compaction(zone
, cc
->order
);
1031 VM_BUG_ON(!list_empty(&cc
->freepages
));
1032 VM_BUG_ON(!list_empty(&cc
->migratepages
));
1038 int compact_pgdat(pg_data_t
*pgdat
, int order
)
1040 struct compact_control cc
= {
1046 return __compact_pgdat(pgdat
, &cc
);
1049 static int compact_node(int nid
)
1051 struct compact_control cc
= {
1057 return __compact_pgdat(NODE_DATA(nid
), &cc
);
1060 /* Compact all nodes in the system */
1061 static int compact_nodes(void)
1065 /* Flush pending updates to the LRU lists */
1066 lru_add_drain_all();
1068 for_each_online_node(nid
)
1071 return COMPACT_COMPLETE
;
1074 /* The written value is actually unused, all memory is compacted */
1075 int sysctl_compact_memory
;
1077 /* This is the entry point for compacting all nodes via /proc/sys/vm */
1078 int sysctl_compaction_handler(struct ctl_table
*table
, int write
,
1079 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
1082 return compact_nodes();
1087 int sysctl_extfrag_handler(struct ctl_table
*table
, int write
,
1088 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
1090 proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
1095 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1096 ssize_t
sysfs_compact_node(struct device
*dev
,
1097 struct device_attribute
*attr
,
1098 const char *buf
, size_t count
)
1102 if (nid
>= 0 && nid
< nr_node_ids
&& node_online(nid
)) {
1103 /* Flush pending updates to the LRU lists */
1104 lru_add_drain_all();
1111 static DEVICE_ATTR(compact
, S_IWUSR
, NULL
, sysfs_compact_node
);
1113 int compaction_register_node(struct node
*node
)
1115 return device_create_file(&node
->dev
, &dev_attr_compact
);
1118 void compaction_unregister_node(struct node
*node
)
1120 return device_remove_file(&node
->dev
, &dev_attr_compact
);
1122 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1124 #endif /* CONFIG_COMPACTION */
This page took 0.057577 seconds and 6 git commands to generate.