x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / mm / hugetlb.c
index 87e11d8..ec49d9e 100644 (file)
@@ -567,13 +567,13 @@ retry:
  * appear as a "reserved" entry instead of simply dangling with incorrect
  * counts.
  */
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
+void hugetlb_fix_reserve_counts(struct inode *inode)
 {
        struct hugepage_subpool *spool = subpool_inode(inode);
        long rsv_adjust;
 
        rsv_adjust = hugepage_subpool_get_pages(spool, 1);
-       if (restore_reserve && rsv_adjust) {
+       if (rsv_adjust) {
                struct hstate *h = hstate_inode(inode);
 
                hugetlb_acct_memory(h, 1);
@@ -1022,7 +1022,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
                nr_nodes--)
 
-#if (defined(CONFIG_X86_64) || defined(CONFIG_S390)) && \
+#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
        ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
        defined(CONFIG_CMA))
 static void destroy_compound_gigantic_page(struct page *page,
@@ -1437,38 +1437,61 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 
 /*
  * Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use (including surplus) hugepages.
+ * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
+ * number of free hugepages would be reduced below the number of reserved
+ * hugepages.
  */
-static void dissolve_free_huge_page(struct page *page)
+static int dissolve_free_huge_page(struct page *page)
 {
+       int rc = 0;
+
        spin_lock(&hugetlb_lock);
        if (PageHuge(page) && !page_count(page)) {
-               struct hstate *h = page_hstate(page);
-               int nid = page_to_nid(page);
-               list_del(&page->lru);
+               struct page *head = compound_head(page);
+               struct hstate *h = page_hstate(head);
+               int nid = page_to_nid(head);
+               if (h->free_huge_pages - h->resv_huge_pages == 0) {
+                       rc = -EBUSY;
+                       goto out;
+               }
+               list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                h->max_huge_pages--;
-               update_and_free_page(h, page);
+               update_and_free_page(h, head);
        }
+out:
        spin_unlock(&hugetlb_lock);
+       return rc;
 }
 
 /*
  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
  * make specified memory blocks removable from the system.
- * Note that start_pfn should aligned with (minimum) hugepage size.
+ * Note that this will dissolve a free gigantic hugepage completely, if any
+ * part of it lies within the given range.
+ * Also note that if dissolve_free_huge_page() returns with an error, all
+ * free hugepages that were dissolved before that error are lost.
  */
-void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
+       struct page *page;
+       int rc = 0;
 
        if (!hugepages_supported())
-               return;
+               return rc;
+
+       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
+               page = pfn_to_page(pfn);
+               if (PageHuge(page) && !page_count(page)) {
+                       rc = dissolve_free_huge_page(page);
+                       if (rc)
+                               break;
+               }
+       }
 
-       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
-       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
-               dissolve_free_huge_page(pfn_to_page(pfn));
+       return rc;
 }
 
 /*