memcg: turn memcg_kmem_skip_account into a bit field
[deliverable/linux.git] / mm / compaction.c
index 8f211bd2ea0d53e7731637555d18eea9e83bd3d7..546e571e9d60d1df57155833ab0e2d6afcb33ac0 100644 (file)
@@ -1158,6 +1158,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        unsigned long end_pfn = zone_end_pfn(zone);
        const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
        const bool sync = cc->mode != MIGRATE_ASYNC;
+       unsigned long last_migrated_pfn = 0;
 
        ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
@@ -1203,6 +1204,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        while ((ret = compact_finished(zone, cc, migratetype)) ==
                                                COMPACT_CONTINUE) {
                int err;
+               unsigned long isolate_start_pfn = cc->migrate_pfn;
 
                switch (isolate_migratepages(zone, cc)) {
                case ISOLATE_ABORT:
@@ -1211,7 +1213,12 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                        cc->nr_migratepages = 0;
                        goto out;
                case ISOLATE_NONE:
-                       continue;
+                       /*
+                        * We haven't isolated and migrated anything, but
+                        * there might still be unflushed migrations from
+                        * previous cc->order aligned block.
+                        */
+                       goto check_drain;
                case ISOLATE_SUCCESS:
                        ;
                }
@@ -1236,6 +1243,40 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                                goto out;
                        }
                }
+
+               /*
+                * Record where we could have freed pages by migration and not
+                * yet flushed them to buddy allocator. We use the pfn that
+                * isolate_migratepages() started from in this loop iteration
+                * - this is the lowest page that could have been isolated and
+                * then freed by migration.
+                */
+               if (!last_migrated_pfn)
+                       last_migrated_pfn = isolate_start_pfn;
+
+check_drain:
+               /*
+                * Has the migration scanner moved away from the previous
+                * cc->order aligned block where we migrated from? If yes,
+                * flush the pages that were freed, so that they can merge and
+                * compact_finished() can detect immediately if allocation
+                * would succeed.
+                */
+               if (cc->order > 0 && last_migrated_pfn) {
+                       int cpu;
+                       unsigned long current_block_start =
+                               cc->migrate_pfn & ~((1UL << cc->order) - 1);
+
+                       if (last_migrated_pfn < current_block_start) {
+                               cpu = get_cpu();
+                               lru_add_drain_cpu(cpu);
+                               drain_local_pages(zone);
+                               put_cpu();
+                               /* No more flushing until we migrate again */
+                               last_migrated_pfn = 0;
+                       }
+               }
+
        }
 
 out:
This page took 0.041155 seconds and 5 git commands to generate.