mm/hugetlb: check for reserved hugepages during memory offline
[cascardo/linux.git] / mm / page_isolation.c
index c4f5682..064b7fb 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/pageblock-flags.h>
 #include <linux/memory.h>
 #include <linux/hugetlb.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -80,7 +81,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
        struct zone *zone;
        unsigned long flags, nr_pages;
-       struct page *isolated_page = NULL;
+       bool isolated_page = false;
        unsigned int order;
        unsigned long page_idx, buddy_idx;
        struct page *buddy;
@@ -108,9 +109,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
                        if (pfn_valid_within(page_to_pfn(buddy)) &&
                            !is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
-                               kernel_map_pages(page, (1 << order), 1);
-                               set_page_refcounted(page);
-                               isolated_page = page;
+                               isolated_page = true;
                        }
                }
        }
@@ -128,8 +127,10 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
        zone->nr_isolate_pageblock--;
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
-       if (isolated_page)
-               __free_pages(isolated_page, order);
+       if (isolated_page) {
+               post_alloc_hook(page, order, __GFP_MOVABLE);
+               __free_pages(page, order);
+       }
 }
 
 static inline struct page *
@@ -246,6 +247,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
        return pfn;
 }
 
+/* Caller should ensure that requested range is in a single zone */
 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
                        bool skip_hwpoisoned_pages)
 {
@@ -288,13 +290,10 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
         * accordance with memory policy of the user process if possible. For
         * now as a simple work-around, we use the next node for destination.
         */
-       if (PageHuge(page)) {
-               int node = next_online_node(page_to_nid(page));
-               if (node == MAX_NUMNODES)
-                       node = first_online_node;
+       if (PageHuge(page))
                return alloc_huge_page_node(page_hstate(compound_head(page)),
-                                           node);
-       }
+                                           next_node_in(page_to_nid(page),
+                                                        node_online_map));
 
        if (PageHighMem(page))
                gfp_mask |= __GFP_HIGHMEM;