mm, compaction: add per-zone migration pfn cache for async compaction
[cascardo/linux.git] / mm / compaction.c
index d0c7c99..70c0f8c 100644 (file)
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
        unsigned long end_pfn = zone_end_pfn(zone);
        unsigned long pfn;
 
-       zone->compact_cached_migrate_pfn = start_pfn;
+       zone->compact_cached_migrate_pfn[0] = start_pfn;
+       zone->compact_cached_migrate_pfn[1] = start_pfn;
        zone->compact_cached_free_pfn = end_pfn;
        zone->compact_blockskip_flush = false;
 
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
  */
 static void update_pageblock_skip(struct compact_control *cc,
                        struct page *page, unsigned long nr_isolated,
-                       bool migrate_scanner)
+                       bool set_unsuitable, bool migrate_scanner)
 {
        struct zone *zone = cc->zone;
+       unsigned long pfn;
 
        if (cc->ignore_skip_hint)
                return;
@@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
        if (!page)
                return;
 
-       if (!nr_isolated) {
-               unsigned long pfn = page_to_pfn(page);
+       if (nr_isolated)
+               return;
+
+       /*
+        * Only skip pageblocks when all forms of compaction will be known to
+        * fail in the near future.
+        */
+       if (set_unsuitable)
                set_pageblock_skip(page);
 
-               /* Update where compaction should restart */
-               if (migrate_scanner) {
-                       if (!cc->finished_update_migrate &&
-                           pfn > zone->compact_cached_migrate_pfn)
-                               zone->compact_cached_migrate_pfn = pfn;
-               } else {
-                       if (!cc->finished_update_free &&
-                           pfn < zone->compact_cached_free_pfn)
-                               zone->compact_cached_free_pfn = pfn;
-               }
+       pfn = page_to_pfn(page);
+
+       /* Update where async and sync compaction should restart */
+       if (migrate_scanner) {
+               if (cc->finished_update_migrate)
+                       return;
+               if (pfn > zone->compact_cached_migrate_pfn[0])
+                       zone->compact_cached_migrate_pfn[0] = pfn;
+               if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
+                       zone->compact_cached_migrate_pfn[1] = pfn;
+       } else {
+               if (cc->finished_update_free)
+                       return;
+               if (pfn < zone->compact_cached_free_pfn)
+                       zone->compact_cached_free_pfn = pfn;
        }
 }
 #else
@@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
 
 static void update_pageblock_skip(struct compact_control *cc,
                        struct page *page, unsigned long nr_isolated,
-                       bool migrate_scanner)
+                       bool set_unsuitable, bool migrate_scanner)
 {
 }
 #endif /* CONFIG_COMPACTION */
@@ -323,7 +336,8 @@ isolate_fail:
 
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
-               update_pageblock_skip(cc, valid_page, total_isolated, false);
+               update_pageblock_skip(cc, valid_page, total_isolated, true,
+                                     false);
 
        count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
        if (total_isolated)
@@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        unsigned long flags;
        bool locked = false;
        struct page *page = NULL, *valid_page = NULL;
-       bool skipped_async_unsuitable = false;
+       bool set_unsuitable = true;
        const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
                                    (unevictable ? ISOLATE_UNEVICTABLE : 0);
 
@@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                         */
                        mt = get_pageblock_migratetype(page);
                        if (!cc->sync && !migrate_async_suitable(mt)) {
-                               cc->finished_update_migrate = true;
-                               skipped_async_unsuitable = true;
+                               set_unsuitable = false;
                                goto next_pageblock;
                        }
                }
@@ -640,11 +653,10 @@ next_pageblock:
        /*
         * Update the pageblock-skip information and cached scanner pfn,
         * if the whole pageblock was scanned without isolating any page.
-        * This is not done when pageblock was skipped due to being unsuitable
-        * for async compaction, so that eventual sync compaction can try.
         */
-       if (low_pfn == end_pfn && !skipped_async_unsuitable)
-               update_pageblock_skip(cc, valid_page, nr_isolated, true);
+       if (low_pfn == end_pfn)
+               update_pageblock_skip(cc, valid_page, nr_isolated,
+                                     set_unsuitable, true);
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
@@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone,
        /* Compaction run completes if the migrate and free scanner meet */
        if (cc->free_pfn <= cc->migrate_pfn) {
                /* Let the next compaction start anew. */
-               zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+               zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
+               zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
                zone->compact_cached_free_pfn = zone_end_pfn(zone);
 
                /*
@@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
         * information on where the scanners should start but check that it
         * is initialised by ensuring the values are within zone boundaries.
         */
-       cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+       cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
        cc->free_pfn = zone->compact_cached_free_pfn;
        if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
                cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
@@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        }
        if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
                cc->migrate_pfn = start_pfn;
-               zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+               zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+               zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
        }
 
        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);