mm, compaction: don't recheck watermarks after COMPACT_SUCCESS
[cascardo/linux.git] / mm / compaction.c
index 9affb29..2e1113f 100644 (file)
@@ -1329,13 +1329,13 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
 
                /* Job done if page is free of the right migratetype */
                if (!list_empty(&area->free_list[migratetype]))
-                       return COMPACT_PARTIAL;
+                       return COMPACT_SUCCESS;
 
 #ifdef CONFIG_CMA
                /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
                if (migratetype == MIGRATE_MOVABLE &&
                        !list_empty(&area->free_list[MIGRATE_CMA]))
-                       return COMPACT_PARTIAL;
+                       return COMPACT_SUCCESS;
 #endif
                /*
                 * Job done if allocation would steal freepages from
@@ -1343,7 +1343,7 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
                 */
                if (find_suitable_fallback(area, order, migratetype,
                                                true, &can_steal) != -1)
-                       return COMPACT_PARTIAL;
+                       return COMPACT_SUCCESS;
        }
 
        return COMPACT_NO_SUITABLE_PAGE;
@@ -1367,7 +1367,7 @@ static enum compact_result compact_finished(struct zone *zone,
  * compaction_suitable: Is this suitable to run compaction on this zone now?
  * Returns
  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
- *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
+ *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
  *   COMPACT_CONTINUE - If compaction should run now
  */
 static enum compact_result __compaction_suitable(struct zone *zone, int order,
@@ -1388,7 +1388,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
         */
        if (zone_watermark_ok(zone, order, watermark, classzone_idx,
                                                                alloc_flags))
-               return COMPACT_PARTIAL;
+               return COMPACT_SUCCESS;
 
        /*
         * Watermarks for order-0 must be met for compaction. Note the 2UL.
@@ -1477,7 +1477,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
        ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
        /* Compaction is likely to fail */
-       if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
+       if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
                return ret;
 
        /* huh, compaction_suitable is returning something unexpected */
@@ -1492,23 +1492,29 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 
        /*
         * Setup to move all movable pages to the end of the zone. Used cached
-        * information on where the scanners should start but check that it
-        * is initialised by ensuring the values are within zone boundaries.
+        * information on where the scanners should start (unless we explicitly
+        * want to compact the whole zone), but check that it is initialised
+        * by ensuring the values are within zone boundaries.
         */
-       cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
-       cc->free_pfn = zone->compact_cached_free_pfn;
-       if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
-               cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
-               zone->compact_cached_free_pfn = cc->free_pfn;
-       }
-       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
+       if (cc->whole_zone) {
                cc->migrate_pfn = start_pfn;
-               zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
-               zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
-       }
+               cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
+       } else {
+               cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
+               cc->free_pfn = zone->compact_cached_free_pfn;
+               if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
+                       cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
+                       zone->compact_cached_free_pfn = cc->free_pfn;
+               }
+               if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
+                       cc->migrate_pfn = start_pfn;
+                       zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+                       zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
+               }
 
-       if (cc->migrate_pfn == start_pfn)
-               cc->whole_zone = true;
+               if (cc->migrate_pfn == start_pfn)
+                       cc->whole_zone = true;
+       }
 
        cc->last_migrated_pfn = 0;
 
@@ -1692,9 +1698,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
                                        alloc_flags, ac_classzone_idx(ac));
                rc = max(status, rc);
 
-               /* If a normal allocation would succeed, stop compacting */
-               if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
-                                       ac_classzone_idx(ac), alloc_flags)) {
+               /* The allocation should succeed, stop compacting */
+               if (status == COMPACT_SUCCESS) {
                        /*
                         * We think the allocation will succeed in this zone,
                         * but it is not certain, hence the false. The caller
@@ -1730,10 +1735,18 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 
 
 /* Compact all zones within a node */
-static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
+static void compact_node(int nid)
 {
+       pg_data_t *pgdat = NODE_DATA(nid);
        int zoneid;
        struct zone *zone;
+       struct compact_control cc = {
+               .order = -1,
+               .mode = MIGRATE_SYNC,
+               .ignore_skip_hint = true,
+               .whole_zone = true,
+       };
+
 
        for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 
@@ -1741,60 +1754,19 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                if (!populated_zone(zone))
                        continue;
 
-               cc->nr_freepages = 0;
-               cc->nr_migratepages = 0;
-               cc->zone = zone;
-               INIT_LIST_HEAD(&cc->freepages);
-               INIT_LIST_HEAD(&cc->migratepages);
-
-               /*
-                * When called via /proc/sys/vm/compact_memory
-                * this makes sure we compact the whole zone regardless of
-                * cached scanner positions.
-                */
-               if (is_via_compact_memory(cc->order))
-                       __reset_isolation_suitable(zone);
-
-               if (is_via_compact_memory(cc->order) ||
-                               !compaction_deferred(zone, cc->order))
-                       compact_zone(zone, cc);
-
-               VM_BUG_ON(!list_empty(&cc->freepages));
-               VM_BUG_ON(!list_empty(&cc->migratepages));
+               cc.nr_freepages = 0;
+               cc.nr_migratepages = 0;
+               cc.zone = zone;
+               INIT_LIST_HEAD(&cc.freepages);
+               INIT_LIST_HEAD(&cc.migratepages);
 
-               if (is_via_compact_memory(cc->order))
-                       continue;
+               compact_zone(zone, &cc);
 
-               if (zone_watermark_ok(zone, cc->order,
-                               low_wmark_pages(zone), 0, 0))
-                       compaction_defer_reset(zone, cc->order, false);
+               VM_BUG_ON(!list_empty(&cc.freepages));
+               VM_BUG_ON(!list_empty(&cc.migratepages));
        }
 }
 
-void compact_pgdat(pg_data_t *pgdat, int order)
-{
-       struct compact_control cc = {
-               .order = order,
-               .mode = MIGRATE_ASYNC,
-       };
-
-       if (!order)
-               return;
-
-       __compact_pgdat(pgdat, &cc);
-}
-
-static void compact_node(int nid)
-{
-       struct compact_control cc = {
-               .order = -1,
-               .mode = MIGRATE_SYNC,
-               .ignore_skip_hint = true,
-       };
-
-       __compact_pgdat(NODE_DATA(nid), &cc);
-}
-
 /* Compact all nodes in the system */
 static void compact_nodes(void)
 {
@@ -1900,8 +1872,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                .ignore_skip_hint = true,
 
        };
-       bool success = false;
-
        trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
                                                        cc.classzone_idx);
        count_vm_event(KCOMPACTD_WAKE);
@@ -1930,9 +1900,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                        return;
                status = compact_zone(zone, &cc);
 
-               if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
-                                               cc.classzone_idx, 0)) {
-                       success = true;
+               if (status == COMPACT_SUCCESS) {
                        compaction_defer_reset(zone, cc.order, false);
                } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
                        /*