tpm_tis: Disable interrupt auto probing on a per-device basis
[cascardo/linux.git] / mm / hugetlb.c
index ef6963b..06ae13e 100644 (file)
@@ -4,7 +4,6 @@
  */
 #include <linux/list.h>
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/seq_file.h>
 #include <linux/sysctl.h>
@@ -1002,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
                nr_nodes--)
 
-#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
+#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
 static void destroy_compound_gigantic_page(struct page *page,
                                        unsigned int order)
 {
@@ -1215,8 +1214,8 @@ void free_huge_page(struct page *page)
 
        set_page_private(page, 0);
        page->mapping = NULL;
-       BUG_ON(page_count(page));
-       BUG_ON(page_mapcount(page));
+       VM_BUG_ON_PAGE(page_count(page), page);
+       VM_BUG_ON_PAGE(page_mapcount(page), page);
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
@@ -1268,8 +1267,8 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
 
        /* we rely on prep_new_huge_page to set the destructor */
        set_compound_order(page, order);
-       __SetPageHead(page);
        __ClearPageReserved(page);
+       __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                /*
                 * For gigantic hugepages allocated through bootmem at
@@ -1287,6 +1286,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
                set_page_count(p, 0);
                set_compound_head(p, page);
        }
+       atomic_set(compound_mapcount_ptr(page), -1);
 }
 
 /*
@@ -2549,25 +2549,6 @@ static void hugetlb_unregister_node(struct node *node)
        nhs->hugepages_kobj = NULL;
 }
 
-/*
- * hugetlb module exit:  unregister hstate attributes from node devices
- * that have them.
- */
-static void hugetlb_unregister_all_nodes(void)
-{
-       int nid;
-
-       /*
-        * disable node device registrations.
-        */
-       register_hugetlbfs_with_node(NULL, NULL);
-
-       /*
-        * remove hstate attributes from any nodes that have them.
-        */
-       for (nid = 0; nid < nr_node_ids; nid++)
-               hugetlb_unregister_node(node_devices[nid]);
-}
 
 /*
  * Register hstate attributes for a single node device.
@@ -2632,27 +2613,10 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
        return NULL;
 }
 
-static void hugetlb_unregister_all_nodes(void) { }
-
 static void hugetlb_register_all_nodes(void) { }
 
 #endif
 
-static void __exit hugetlb_exit(void)
-{
-       struct hstate *h;
-
-       hugetlb_unregister_all_nodes();
-
-       for_each_hstate(h) {
-               kobject_put(hstate_kobjs[hstate_index(h)]);
-       }
-
-       kobject_put(hugepages_kobj);
-       kfree(hugetlb_fault_mutex_table);
-}
-module_exit(hugetlb_exit);
-
 static int __init hugetlb_init(void)
 {
        int i;
@@ -2690,7 +2654,7 @@ static int __init hugetlb_init(void)
                mutex_init(&hugetlb_fault_mutex_table[i]);
        return 0;
 }
-module_init(hugetlb_init);
+subsys_initcall(hugetlb_init);
 
 /* Should be called on processing a hugepagesz=... option */
 void __init hugetlb_add_hstate(unsigned int order)
@@ -3139,7 +3103,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
-                       page_dup_rmap(ptepage);
+                       page_dup_rmap(ptepage, true);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                        hugetlb_count_add(pages_per_huge_page(h), dst);
                }
@@ -3223,7 +3187,7 @@ again:
                        set_page_dirty(page);
 
                hugetlb_count_sub(pages_per_huge_page(h), mm);
-               page_remove_rmap(page);
+               page_remove_rmap(page, true);
                force_flush = !__tlb_remove_page(tlb, page);
                if (force_flush) {
                        address += sz;
@@ -3452,7 +3416,7 @@ retry_avoidcopy:
                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
-               page_remove_rmap(old_page);
+               page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
@@ -3622,7 +3586,7 @@ retry:
                ClearPagePrivate(page);
                hugepage_add_new_anon_rmap(page, vma, address);
        } else
-               page_dup_rmap(page);
+               page_dup_rmap(page, true);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
@@ -3902,7 +3866,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
-                       get_page_foll(pages[i]);
+                       get_page(pages[i]);
                }
 
                if (vmas)