diff options
-rw-r--r-- | mm/compaction.c | 30 | ||||
-rw-r--r-- | mm/internal.h | 1 |
2 files changed, 16 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 8f64d3533990..c5c627aae996 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1115,6 +1115,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; + unsigned long isolate_start_pfn; struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | @@ -1163,6 +1164,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, continue; /* Perform the isolation */ + isolate_start_pfn = low_pfn; low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, isolate_mode); @@ -1172,6 +1174,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } /* + * Record where we could have freed pages by migration and not + * yet flushed them to buddy allocator. + * - this is the lowest page that could have been isolated and + * then freed by migration. + */ + if (cc->nr_migratepages && !cc->last_migrated_pfn) + cc->last_migrated_pfn = isolate_start_pfn; + + /* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should * continue or not. @@ -1342,7 +1353,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long end_pfn = zone_end_pfn(zone); const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; - unsigned long last_migrated_pfn = 0; ret = compaction_suitable(zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -1380,6 +1390,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } + cc->last_migrated_pfn = 0; trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync); @@ -1389,7 +1400,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) while ((ret = compact_finished(zone, cc, migratetype)) == COMPACT_CONTINUE) { int err; - unsigned long isolate_start_pfn = cc->migrate_pfn; switch (isolate_migratepages(zone, cc)) { case ISOLATE_ABORT: @@ -1429,16 +1439,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } } - /* - * Record where we could have freed pages by migration and not - * yet flushed them to buddy allocator. We use the pfn that - * isolate_migratepages() started from in this loop iteration - * - this is the lowest page that could have been isolated and - * then freed by migration. - */ - if (!last_migrated_pfn) - last_migrated_pfn = isolate_start_pfn; - check_drain: /* * Has the migration scanner moved away from the previous @@ -1447,18 +1447,18 @@ check_drain: * compact_finished() can detect immediately if allocation * would succeed. */ - if (cc->order > 0 && last_migrated_pfn) { + if (cc->order > 0 && cc->last_migrated_pfn) { int cpu; unsigned long current_block_start = cc->migrate_pfn & ~((1UL << cc->order) - 1); - if (last_migrated_pfn < current_block_start) { + if (cc->last_migrated_pfn < current_block_start) { cpu = get_cpu(); lru_add_drain_cpu(cpu); drain_local_pages(zone); put_cpu(); /* No more flushing until we migrate again */ - last_migrated_pfn = 0; + cc->last_migrated_pfn = 0; } } diff --git a/mm/internal.h b/mm/internal.h index 1195dd2d6a2b..bc0fa9a69e46 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -182,6 +182,7 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ + unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ int order; /* order a direct compactor needs */ |