checkpatch: exclude asm volatile from complex macro check
[cascardo/linux.git] / mm / compaction.c
index 585de54..93f71d9 100644 (file)
@@ -71,49 +71,6 @@ static inline bool migrate_async_suitable(int migratetype)
        return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
 }
 
-/*
- * Check that the whole (or subset of) a pageblock given by the interval of
- * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
- * with the migration of free compaction scanner. The scanners then need to
- * use only pfn_valid_within() check for arches that allow holes within
- * pageblocks.
- *
- * Return struct page pointer of start_pfn, or NULL if checks were not passed.
- *
- * It's possible on some configurations to have a setup like node0 node1 node0
- * i.e. it's possible that all pages within a zones range of pages do not
- * belong to a single zone. We assume that a border between node0 and node1
- * can occur within a single pageblock, but not a node0 node1 node0
- * interleaving within a single pageblock. It is therefore sufficient to check
- * the first and last page of a pageblock and avoid checking each individual
- * page in a pageblock.
- */
-static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
-                               unsigned long end_pfn, struct zone *zone)
-{
-       struct page *start_page;
-       struct page *end_page;
-
-       /* end_pfn is one past the range we are checking */
-       end_pfn--;
-
-       if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
-               return NULL;
-
-       start_page = pfn_to_page(start_pfn);
-
-       if (page_zone(start_page) != zone)
-               return NULL;
-
-       end_page = pfn_to_page(end_pfn);
-
-       /* This gives a shorter code than deriving page_zone(end_page) */
-       if (page_zone_id(start_page) != page_zone_id(end_page))
-               return NULL;
-
-       return start_page;
-}
-
 #ifdef CONFIG_COMPACTION
 
 /* Do not skip compaction more than 64 times */
@@ -200,7 +157,8 @@ static void reset_cached_positions(struct zone *zone)
 {
        zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
        zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
-       zone->compact_cached_free_pfn = zone_end_pfn(zone);
+       zone->compact_cached_free_pfn =
+                       round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
 }
 
 /*
@@ -554,13 +512,17 @@ unsigned long
 isolate_freepages_range(struct compact_control *cc,
                        unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned long isolated, pfn, block_end_pfn;
+       unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
        LIST_HEAD(freelist);
 
        pfn = start_pfn;
+       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 
        for (; pfn < end_pfn; pfn += isolated,
+                               block_start_pfn = block_end_pfn,
                                block_end_pfn += pageblock_nr_pages) {
                /* Protect pfn from changing by isolate_freepages_block */
                unsigned long isolate_start_pfn = pfn;
@@ -573,11 +535,13 @@ isolate_freepages_range(struct compact_control *cc,
                 * scanning range to right one.
                 */
                if (pfn >= block_end_pfn) {
+                       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
                        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                        block_end_pfn = min(block_end_pfn, end_pfn);
                }
 
-               if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
+               if (!pageblock_pfn_to_page(block_start_pfn,
+                                       block_end_pfn, cc->zone))
                        break;
 
                isolated = isolate_freepages_block(cc, &isolate_start_pfn,
@@ -863,18 +827,23 @@ unsigned long
 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                                                        unsigned long end_pfn)
 {
-       unsigned long pfn, block_end_pfn;
+       unsigned long pfn, block_start_pfn, block_end_pfn;
 
        /* Scan block by block. First and last block may be incomplete */
        pfn = start_pfn;
+       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 
        for (; pfn < end_pfn; pfn = block_end_pfn,
+                               block_start_pfn = block_end_pfn,
                                block_end_pfn += pageblock_nr_pages) {
 
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
+               if (!pageblock_pfn_to_page(block_start_pfn,
+                                       block_end_pfn, cc->zone))
                        continue;
 
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
@@ -1103,7 +1072,9 @@ int sysctl_compact_unevictable_allowed __read_mostly = 1;
 static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                        struct compact_control *cc)
 {
-       unsigned long low_pfn, end_pfn;
+       unsigned long block_start_pfn;
+       unsigned long block_end_pfn;
+       unsigned long low_pfn;
        unsigned long isolate_start_pfn;
        struct page *page;
        const isolate_mode_t isolate_mode =
@@ -1115,16 +1086,21 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
         * initialized by compact_zone()
         */
        low_pfn = cc->migrate_pfn;
+       block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < zone->zone_start_pfn)
+               block_start_pfn = zone->zone_start_pfn;
 
        /* Only scan within a pageblock boundary */
-       end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
+       block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
 
        /*
         * Iterate over whole pageblocks until we find the first suitable.
         * Do not cross the free scanner.
         */
-       for (; end_pfn <= cc->free_pfn;
-                       low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
+       for (; block_end_pfn <= cc->free_pfn;
+                       low_pfn = block_end_pfn,
+                       block_start_pfn = block_end_pfn,
+                       block_end_pfn += pageblock_nr_pages) {
 
                /*
                 * This can potentially iterate a massively long zone with
@@ -1135,7 +1111,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                                && compact_should_abort(cc))
                        break;
 
-               page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
+               page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
+                                                                       zone);
                if (!page)
                        continue;
 
@@ -1154,8 +1131,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 
                /* Perform the isolation */
                isolate_start_pfn = low_pfn;
-               low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
-                                                               isolate_mode);
+               low_pfn = isolate_migratepages_block(cc, low_pfn,
+                                               block_end_pfn, isolate_mode);
 
                if (!low_pfn || cc->contended) {
                        acct_isolated(zone, cc);
@@ -1371,11 +1348,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
         */
        cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
        cc->free_pfn = zone->compact_cached_free_pfn;
-       if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
-               cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+       if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
+               cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
                zone->compact_cached_free_pfn = cc->free_pfn;
        }
-       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
                cc->migrate_pfn = start_pfn;
                zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
                zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;