net: s6gmac: remove driver
[cascardo/linux.git] / mm / page_alloc.c
index a7198c0..7633c50 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
+#include <linux/page_ext.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
 #include <linux/compaction.h>
 #include <linux/prefetch.h>
 #include <linux/mm_inline.h>
 #include <linux/migrate.h>
-#include <linux/page-debug-flags.h>
+#include <linux/page_ext.h>
 #include <linux/hugetlb.h>
 #include <linux/sched/rt.h>
+#include <linux/page_owner.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -109,6 +111,7 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
+unsigned long totalcma_pages __read_mostly;
 /*
  * When calculating the number of globally allowed dirty pages, there
  * is a certain number of per-zone reserves that should not be
@@ -424,6 +427,42 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
+bool _debug_pagealloc_enabled __read_mostly;
+bool _debug_guardpage_enabled __read_mostly;
+
+static int __init early_debug_pagealloc(char *buf)
+{
+       if (!buf)
+               return -EINVAL;
+
+       if (strcmp(buf, "on") == 0)
+               _debug_pagealloc_enabled = true;
+
+       return 0;
+}
+early_param("debug_pagealloc", early_debug_pagealloc);
+
+static bool need_debug_guardpage(void)
+{
+       /* If we don't use debug_pagealloc, we don't need guard page */
+       if (!debug_pagealloc_enabled())
+               return false;
+
+       return true;
+}
+
+static void init_debug_guardpage(void)
+{
+       if (!debug_pagealloc_enabled())
+               return;
+
+       _debug_guardpage_enabled = true;
+}
+
+struct page_ext_operations debug_guardpage_ops = {
+       .need = need_debug_guardpage,
+       .init = init_debug_guardpage,
+};
 
 static int __init debug_guardpage_minorder_setup(char *buf)
 {
@@ -439,18 +478,44 @@ static int __init debug_guardpage_minorder_setup(char *buf)
 }
 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 
-static inline void set_page_guard_flag(struct page *page)
+static inline void set_page_guard(struct zone *zone, struct page *page,
+                               unsigned int order, int migratetype)
 {
-       __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+       struct page_ext *page_ext;
+
+       if (!debug_guardpage_enabled())
+               return;
+
+       page_ext = lookup_page_ext(page);
+       __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+       INIT_LIST_HEAD(&page->lru);
+       set_page_private(page, order);
+       /* Guard pages are not available for any usage */
+       __mod_zone_freepage_state(zone, -(1 << order), migratetype);
 }
 
-static inline void clear_page_guard_flag(struct page *page)
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+                               unsigned int order, int migratetype)
 {
-       __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+       struct page_ext *page_ext;
+
+       if (!debug_guardpage_enabled())
+               return;
+
+       page_ext = lookup_page_ext(page);
+       __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+       set_page_private(page, 0);
+       if (!is_migrate_isolate(migratetype))
+               __mod_zone_freepage_state(zone, (1 << order), migratetype);
 }
 #else
-static inline void set_page_guard_flag(struct page *page) { }
-static inline void clear_page_guard_flag(struct page *page) { }
+struct page_ext_operations debug_guardpage_ops = { NULL, };
+static inline void set_page_guard(struct zone *zone, struct page *page,
+                               unsigned int order, int migratetype) {}
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+                               unsigned int order, int migratetype) {}
 #endif
 
 static inline void set_page_order(struct page *page, unsigned int order)
@@ -581,12 +646,7 @@ static inline void __free_one_page(struct page *page,
                 * merge with it and move up one order.
                 */
                if (page_is_guard(buddy)) {
-                       clear_page_guard_flag(buddy);
-                       set_page_private(buddy, 0);
-                       if (!is_migrate_isolate(migratetype)) {
-                               __mod_zone_freepage_state(zone, 1 << order,
-                                                         migratetype);
-                       }
+                       clear_page_guard(zone, buddy, order, migratetype);
                } else {
                        list_del(&buddy->lru);
                        zone->free_area[order].nr_free--;
@@ -755,6 +815,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        if (bad)
                return false;
 
+       reset_page_owner(page, order);
+
        if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),
                                           PAGE_SIZE << order);
@@ -861,23 +923,18 @@ static inline void expand(struct zone *zone, struct page *page,
                size >>= 1;
                VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-               if (high < debug_guardpage_minorder()) {
+               if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+                       debug_guardpage_enabled() &&
+                       high < debug_guardpage_minorder()) {
                        /*
                         * Mark as guard pages (or page), that will allow to
                         * merge back to allocator when buddy will be freed.
                         * Corresponding page table entries will not be touched,
                         * pages will stay not present in virtual address space
                         */
-                       INIT_LIST_HEAD(&page[size].lru);
-                       set_page_guard_flag(&page[size]);
-                       set_page_private(&page[size], high);
-                       /* Guard pages are not available for any usage */
-                       __mod_zone_freepage_state(zone, -(1 << high),
-                                                 migratetype);
+                       set_page_guard(zone, &page[size], high, migratetype);
                        continue;
                }
-#endif
                list_add(&page[size].lru, &area->free_list[migratetype]);
                area->nr_free++;
                set_page_order(&page[size], high);
@@ -935,6 +992,8 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
 
+       set_page_owner(page, order, gfp_flags);
+
        return 0;
 }
 
@@ -1507,8 +1566,11 @@ void split_page(struct page *page, unsigned int order)
                split_page(virt_to_page(page[0].shadow), order);
 #endif
 
-       for (i = 1; i < (1 << order); i++)
+       set_page_owner(page, 0, 0);
+       for (i = 1; i < (1 << order); i++) {
                set_page_refcounted(page + i);
+               set_page_owner(page + i, 0, 0);
+       }
 }
 EXPORT_SYMBOL_GPL(split_page);
 
@@ -1548,6 +1610,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
                }
        }
 
+       set_page_owner(page, order, 0);
        return 1UL << order;
 }
 
@@ -1990,7 +2053,7 @@ zonelist_scan:
 
        /*
         * Scan zonelist, looking for a zone with enough free.
-        * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
+        * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                high_zoneidx, nodemask) {
@@ -2001,7 +2064,7 @@ zonelist_scan:
                                continue;
                if (cpusets_enabled() &&
                        (alloc_flags & ALLOC_CPUSET) &&
-                       !cpuset_zone_allowed_softwall(zone, gfp_mask))
+                       !cpuset_zone_allowed(zone, gfp_mask))
                                continue;
                /*
                 * Distribute pages in proportion to the individual
@@ -2529,7 +2592,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
                        alloc_flags |= ALLOC_HARDER;
                /*
                 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
-                * comment for __cpuset_node_allowed_softwall().
+                * comment for __cpuset_node_allowed().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
        } else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -4856,6 +4919,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 #endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
+       pgdat_page_ext_init(pgdat);
 
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
@@ -4874,16 +4938,18 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                 * and per-cpu initialisations
                 */
                memmap_pages = calc_memmap_size(size, realsize);
-               if (freesize >= memmap_pages) {
-                       freesize -= memmap_pages;
-                       if (memmap_pages)
-                               printk(KERN_DEBUG
-                                      "  %s zone: %lu pages used for memmap\n",
-                                      zone_names[j], memmap_pages);
-               } else
-                       printk(KERN_WARNING
-                               "  %s zone: %lu pages exceeds freesize %lu\n",
-                               zone_names[j], memmap_pages, freesize);
+               if (!is_highmem_idx(j)) {
+                       if (freesize >= memmap_pages) {
+                               freesize -= memmap_pages;
+                               if (memmap_pages)
+                                       printk(KERN_DEBUG
+                                              "  %s zone: %lu pages used for memmap\n",
+                                              zone_names[j], memmap_pages);
+                       } else
+                               printk(KERN_WARNING
+                                       "  %s zone: %lu pages exceeds freesize %lu\n",
+                                       zone_names[j], memmap_pages, freesize);
+               }
 
                /* Account for reserved pages */
                if (j == 0 && freesize > dma_reserve) {
@@ -5521,7 +5587,7 @@ void __init mem_init_print_info(const char *str)
 
        pr_info("Memory: %luK/%luK available "
               "(%luK kernel code, %luK rwdata, %luK rodata, "
-              "%luK init, %luK bss, %luK reserved"
+              "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
 #ifdef CONFIG_HIGHMEM
               ", %luK highmem"
 #endif
@@ -5529,7 +5595,8 @@ void __init mem_init_print_info(const char *str)
               nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
               codesize >> 10, datasize >> 10, rosize >> 10,
               (init_data_size + init_code_size) >> 10, bss_size >> 10,
-              (physpages - totalram_pages) << (PAGE_SHIFT-10),
+              (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
+              totalcma_pages << (PAGE_SHIFT-10),
 #ifdef CONFIG_HIGHMEM
               totalhigh_pages << (PAGE_SHIFT-10),
 #endif
@@ -6221,9 +6288,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                if (!PageLRU(page))
                        found++;
                /*
-                * If there are RECLAIMABLE pages, we need to check it.
-                * But now, memory offline itself doesn't call shrink_slab()
-                * and it still to be fixed.
+                * If there are RECLAIMABLE pages, we need to check
+                * it.  But now, memory offline itself doesn't call
+                * shrink_node_slabs() and it still to be fixed.
                 */
                /*
                 * If the page is not RAM, page_count()should be 0.