Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[cascardo/linux.git] / mm / compaction.c
index db76361..2c4ce17 100644 (file)
@@ -50,6 +50,106 @@ static inline bool migrate_async_suitable(int migratetype)
        return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
 }
 
+#ifdef CONFIG_COMPACTION
+/* Returns true if the pageblock should be scanned for pages to isolate. */
+static inline bool isolation_suitable(struct compact_control *cc,
+                                       struct page *page)
+{
+       if (cc->ignore_skip_hint)
+               return true;
+
+       return !get_pageblock_skip(page);
+}
+
+/*
+ * This function is called to clear all cached information on pageblocks that
+ * should be skipped for page isolation when the migrate and free page scanner
+ * meet.
+ */
+static void __reset_isolation_suitable(struct zone *zone)
+{
+       unsigned long start_pfn = zone->zone_start_pfn;
+       unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       unsigned long pfn;
+
+       zone->compact_cached_migrate_pfn = start_pfn;
+       zone->compact_cached_free_pfn = end_pfn;
+       zone->compact_blockskip_flush = false;
+
+       /* Walk the zone and mark every pageblock as suitable for isolation */
+       for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+               struct page *page;
+
+               cond_resched();
+
+               if (!pfn_valid(pfn))
+                       continue;
+
+               page = pfn_to_page(pfn);
+               if (zone != page_zone(page))
+                       continue;
+
+               clear_pageblock_skip(page);
+       }
+}
+
+void reset_isolation_suitable(pg_data_t *pgdat)
+{
+       int zoneid;
+
+       for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+               struct zone *zone = &pgdat->node_zones[zoneid];
+               if (!populated_zone(zone))
+                       continue;
+
+               /* Only flush if a full compaction finished recently */
+               if (zone->compact_blockskip_flush)
+                       __reset_isolation_suitable(zone);
+       }
+}
+
+/*
+ * If no pages were isolated then mark this pageblock to be skipped in the
+ * future. The information is later cleared by __reset_isolation_suitable().
+ */
+static void update_pageblock_skip(struct compact_control *cc,
+                       struct page *page, unsigned long nr_isolated,
+                       bool migrate_scanner)
+{
+       struct zone *zone = cc->zone;
+       if (!page)
+               return;
+
+       if (!nr_isolated) {
+               unsigned long pfn = page_to_pfn(page);
+               set_pageblock_skip(page);
+
+               /* Update where compaction should restart */
+               if (migrate_scanner) {
+                       if (!cc->finished_update_migrate &&
+                           pfn > zone->compact_cached_migrate_pfn)
+                               zone->compact_cached_migrate_pfn = pfn;
+               } else {
+                       if (!cc->finished_update_free &&
+                           pfn < zone->compact_cached_free_pfn)
+                               zone->compact_cached_free_pfn = pfn;
+               }
+       }
+}
+#else
+static inline bool isolation_suitable(struct compact_control *cc,
+                                       struct page *page)
+{
+       return true;
+}
+
+static void update_pageblock_skip(struct compact_control *cc,
+                       struct page *page, unsigned long nr_isolated,
+                       bool migrate_scanner)
+{
+}
+#endif /* CONFIG_COMPACTION */
+
 static inline bool should_release_lock(spinlock_t *lock)
 {
        return need_resched() || spin_is_contended(lock);
@@ -181,7 +281,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                                bool strict)
 {
        int nr_scanned = 0, total_isolated = 0;
-       struct page *cursor;
+       struct page *cursor, *valid_page = NULL;
        unsigned long nr_strict_required = end_pfn - blockpfn;
        unsigned long flags;
        bool locked = false;
@@ -196,6 +296,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                nr_scanned++;
                if (!pfn_valid_within(blockpfn))
                        continue;
+               if (!valid_page)
+                       valid_page = page;
                if (!PageBuddy(page))
                        continue;
 
@@ -250,6 +352,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
        if (locked)
                spin_unlock_irqrestore(&cc->zone->lock, flags);
 
+       /* Update the pageblock-skip if the whole pageblock was scanned */
+       if (blockpfn == end_pfn)
+               update_pageblock_skip(cc, valid_page, total_isolated, false);
+
        return total_isolated;
 }
 
@@ -267,22 +373,14 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
  * a free page).
  */
 unsigned long
-isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+isolate_freepages_range(struct compact_control *cc,
+                       unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long isolated, pfn, block_end_pfn;
-       struct zone *zone = NULL;
        LIST_HEAD(freelist);
 
-       /* cc needed for isolate_freepages_block to acquire zone->lock */
-       struct compact_control cc = {
-               .sync = true,
-       };
-
-       if (pfn_valid(start_pfn))
-               cc.zone = zone = page_zone(pfn_to_page(start_pfn));
-
        for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
-               if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+               if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
                        break;
 
                /*
@@ -292,7 +390,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               isolated = isolate_freepages_block(&cc, pfn, block_end_pfn,
+               isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
                                                   &freelist, true);
 
                /*
@@ -363,6 +461,7 @@ static bool too_many_isolated(struct zone *zone)
  * @cc:                Compaction control structure.
  * @low_pfn:   The first PFN of the range.
  * @end_pfn:   The one-past-the-last PFN of the range.
+ * @unevictable: true if it allows to isolate unevictable pages
  *
  * Isolate all pages that can be migrated from the range specified by
  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
@@ -378,7 +477,7 @@ static bool too_many_isolated(struct zone *zone)
  */
 unsigned long
 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
-                          unsigned long low_pfn, unsigned long end_pfn)
+               unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
 {
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
@@ -387,6 +486,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        struct lruvec *lruvec;
        unsigned long flags;
        bool locked = false;
+       struct page *page = NULL, *valid_page = NULL;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -407,8 +507,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        /* Time to isolate some pages for migration */
        cond_resched();
        for (; low_pfn < end_pfn; low_pfn++) {
-               struct page *page;
-
                /* give a chance to irqs before checking need_resched() */
                if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
                        if (should_release_lock(&zone->lru_lock)) {
@@ -444,6 +542,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (page_zone(page) != zone)
                        continue;
 
+               if (!valid_page)
+                       valid_page = page;
+
+               /* If isolation recently failed, do not retry */
+               pageblock_nr = low_pfn >> pageblock_order;
+               if (!isolation_suitable(cc, page))
+                       goto next_pageblock;
+
                /* Skip if free */
                if (PageBuddy(page))
                        continue;
@@ -453,9 +559,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                 * migration is optimistic to see if the minimum amount of work
                 * satisfies the allocation
                 */
-               pageblock_nr = low_pfn >> pageblock_order;
                if (!cc->sync && last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
+                       cc->finished_update_migrate = true;
                        goto next_pageblock;
                }
 
@@ -497,6 +603,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (!cc->sync)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
+               if (unevictable)
+                       mode |= ISOLATE_UNEVICTABLE;
+
                lruvec = mem_cgroup_page_lruvec(page, zone);
 
                /* Try isolate the page */
@@ -506,6 +615,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                VM_BUG_ON(PageTransCompound(page));
 
                /* Successfully isolated */
+               cc->finished_update_migrate = true;
                del_page_from_lru_list(page, lruvec, page_lru(page));
                list_add(&page->lru, migratelist);
                cc->nr_migratepages++;
@@ -530,6 +640,10 @@ next_pageblock:
        if (locked)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
+       /* Update the pageblock-skip if the whole pageblock was scanned */
+       if (low_pfn == end_pfn)
+               update_pageblock_skip(cc, valid_page, nr_isolated, true);
+
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
        return low_pfn;
@@ -593,6 +707,10 @@ static void isolate_freepages(struct zone *zone,
                if (!suitable_migration_target(page))
                        continue;
 
+               /* If isolation recently failed, do not retry */
+               if (!isolation_suitable(cc, page))
+                       continue;
+
                /* Found a block suitable for isolating free pages from */
                isolated = 0;
                end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
@@ -605,8 +723,10 @@ static void isolate_freepages(struct zone *zone,
                 * looking for free pages, the search will restart here as
                 * page migration may have returned some pages to the allocator
                 */
-               if (isolated)
+               if (isolated) {
+                       cc->finished_update_free = true;
                        high_pfn = max(high_pfn, pfn);
+               }
        }
 
        /* split_free_page does not map the pages */
@@ -691,7 +811,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        }
 
        /* Perform the isolation */
-       low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+       low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
        if (!low_pfn || cc->contended)
                return ISOLATE_ABORT;
 
@@ -709,8 +829,18 @@ static int compact_finished(struct zone *zone,
                return COMPACT_PARTIAL;
 
        /* Compaction run completes if the migrate and free scanner meet */
-       if (cc->free_pfn <= cc->migrate_pfn)
+       if (cc->free_pfn <= cc->migrate_pfn) {
+               /*
+                * Mark that the PG_migrate_skip information should be cleared
+                * by kswapd when it goes to sleep. kswapd does not set the
+                * flag itself as the decision to be clear should be directly
+                * based on an allocation request.
+                */
+               if (!current_is_kswapd())
+                       zone->compact_blockskip_flush = true;
+
                return COMPACT_COMPLETE;
+       }
 
        /*
         * order == -1 is expected when compacting via
@@ -801,6 +931,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
        int ret;
+       unsigned long start_pfn = zone->zone_start_pfn;
+       unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 
        ret = compaction_suitable(zone, cc->order);
        switch (ret) {
@@ -813,10 +945,29 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                ;
        }
 
-       /* Setup to move all movable pages to the end of the zone */
-       cc->migrate_pfn = zone->zone_start_pfn;
-       cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
-       cc->free_pfn &= ~(pageblock_nr_pages-1);
+       /*
+        * Setup to move all movable pages to the end of the zone. Used cached
+        * information on where the scanners should start but check that it
+        * is initialised by ensuring the values are within zone boundaries.
+        */
+       cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+       cc->free_pfn = zone->compact_cached_free_pfn;
+       if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
+               cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+               zone->compact_cached_free_pfn = cc->free_pfn;
+       }
+       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+               cc->migrate_pfn = start_pfn;
+               zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+       }
+
+       /*
+        * Clear pageblock skip if there were failures recently and compaction
+        * is about to be retried after being deferred. kswapd does not do
+        * this reset as it'll reset the cached information when going to sleep.
+        */
+       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+               __reset_isolation_suitable(zone);
 
        migrate_prep_local();