mm/memory-failure: introduce get_hwpoison_page() for consistent refcount handling
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 24 Jun 2015 23:56:48 +0000 (16:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Jun 2015 00:49:42 +0000 (17:49 -0700)
memory_failure() can run in 2 different mode (specified by
MF_COUNT_INCREASED) in page refcount perspective.  When
MF_COUNT_INCREASED is set, memory_failure() assumes that the caller
takes a refcount of the target page.  And if cleared, memory_failure()
takes it in it's own.

In current code, however, refcounting is done differently in each caller.
For example, madvise_hwpoison() uses get_user_pages_fast() and
hwpoison_inject() uses get_page_unless_zero().  So this inconsistent
refcounting causes refcount failure especially for thp tail pages.
Typical user visible effects are like memory leak or
VM_BUG_ON_PAGE(!page_count(page)) in isolate_lru_page().

To fix this refcounting issue, this patch introduces get_hwpoison_page()
to handle thp tail pages in the same manner for each caller of hwpoison
code.

memory_failure() might fail to split thp and in such case it returns
without completing page isolation.  This is not good because PageHWPoison
on the thp is still set and there's no easy way to unpoison such thps.  So
this patch try to roll back any action to the thp in "non anonymous thp"
case and "thp split failed" case, expecting an MCE(SRAR) generated by
later access afterward will properly free such thps.

[akpm@linux-foundation.org: fix CONFIG_HWPOISON_INJECT=m]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/hwpoison-inject.c
mm/memory-failure.c

index 8b08607..cd9df66 100644 (file)
@@ -2146,6 +2146,7 @@ enum mf_flags {
 extern int memory_failure(unsigned long pfn, int trapno, int flags);
 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
 extern int unpoison_memory(unsigned long pfn);
+extern int get_hwpoison_page(struct page *page);
 extern int sysctl_memory_failure_early_kill;
 extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
index 4ca5fe0..bf73ac1 100644 (file)
@@ -28,7 +28,7 @@ static int hwpoison_inject(void *data, u64 val)
        /*
         * This implies unable to support free buddy pages.
         */
-       if (!get_page_unless_zero(hpage))
+       if (!get_hwpoison_page(p))
                return 0;
 
        if (!hwpoison_filter_enable)
@@ -58,7 +58,7 @@ inject:
        pr_info("Injecting memory failure at pfn %#lx\n", pfn);
        return memory_failure(pfn, 18, MF_COUNT_INCREASED);
 put_out:
-       put_page(hpage);
+       put_page(p);
        return 0;
 }
 
index 17a8e3b..a810ab1 100644 (file)
@@ -915,6 +915,39 @@ static int page_action(struct page_state *ps, struct page *p,
        return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
 }
 
+/**
+ * get_hwpoison_page() - Get refcount for memory error handling:
+ * @page:      raw error page (hit by memory error)
+ *
+ * Return: return 0 if failed to grab the refcount, otherwise true (some
+ * non-zero value.)
+ */
+int get_hwpoison_page(struct page *page)
+{
+       struct page *head = compound_head(page);
+
+       if (PageHuge(head))
+               return get_page_unless_zero(head);
+
+       /*
+        * Thp tail page has special refcounting rule (refcount of tail pages
+        * is stored in ->_mapcount,) so we can't call get_page_unless_zero()
+        * directly for tail pages.
+        */
+       if (PageTransHuge(head)) {
+               if (get_page_unless_zero(head)) {
+                       if (PageTail(page))
+                               get_page(page);
+                       return 1;
+               } else {
+                       return 0;
+               }
+       }
+
+       return get_page_unless_zero(page);
+}
+EXPORT_SYMBOL_GPL(get_hwpoison_page);
+
 /*
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
@@ -1097,8 +1130,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * In fact it's dangerous to directly bump up page count from 0,
         * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
         */
-       if (!(flags & MF_COUNT_INCREASED) &&
-               !get_page_unless_zero(hpage)) {
+       if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
                if (is_free_buddy_page(p)) {
                        action_result(pfn, MSG_BUDDY, DELAYED);
                        return 0;
@@ -1130,12 +1162,20 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        if (!PageHuge(p) && PageTransHuge(hpage)) {
                if (!PageAnon(hpage)) {
                        pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       if (TestClearPageHWPoison(p))
+                               atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
+                       if (p != hpage)
+                               put_page(hpage);
                        return -EBUSY;
                }
                if (unlikely(split_huge_page(hpage))) {
                        pr_err("MCE: %#lx: thp split failed\n", pfn);
+                       if (TestClearPageHWPoison(p))
+                               atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
+                       if (p != hpage)
+                               put_page(hpage);
                        return -EBUSY;
                }
                VM_BUG_ON_PAGE(!page_count(p), p);
@@ -1413,12 +1453,12 @@ int unpoison_memory(unsigned long pfn)
         */
        if (!PageHuge(page) && PageTransHuge(page)) {
                pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
-                       return 0;
+               return 0;
        }
 
        nr_pages = 1 << compound_order(page);
 
-       if (!get_page_unless_zero(page)) {
+       if (!get_hwpoison_page(p)) {
                /*
                 * Since HWPoisoned hugepage should have non-zero refcount,
                 * race between memory failure and unpoison seems to happen.
@@ -1486,7 +1526,7 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
         * When the target page is a free hugepage, just remove it
         * from free hugepage list.
         */
-       if (!get_page_unless_zero(compound_head(p))) {
+       if (!get_hwpoison_page(p)) {
                if (PageHuge(p)) {
                        pr_info("%s: %#lx free huge page\n", __func__, pfn);
                        ret = 0;