mm, thp: adjust conditions when we can reuse the page on WP fault
[cascardo/linux.git] / mm / huge_memory.c
index 62fe06b..e45918d 100644 (file)
 #include <asm/pgalloc.h>
 #include "internal.h"
 
+enum scan_result {
+       SCAN_FAIL,
+       SCAN_SUCCEED,
+       SCAN_PMD_NULL,
+       SCAN_EXCEED_NONE_PTE,
+       SCAN_PTE_NON_PRESENT,
+       SCAN_PAGE_RO,
+       SCAN_NO_REFERENCED_PAGE,
+       SCAN_PAGE_NULL,
+       SCAN_SCAN_ABORT,
+       SCAN_PAGE_COUNT,
+       SCAN_PAGE_LRU,
+       SCAN_PAGE_LOCK,
+       SCAN_PAGE_ANON,
+       SCAN_ANY_PROCESS,
+       SCAN_VMA_NULL,
+       SCAN_VMA_CHECK,
+       SCAN_ADDRESS_RANGE,
+       SCAN_SWAP_CACHE_PAGE,
+       SCAN_DEL_PAGE_LRU,
+       SCAN_ALLOC_HUGE_PAGE_FAIL,
+       SCAN_CGROUP_CHARGE_FAIL
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/huge_memory.h>
+
 /*
  * By default transparent hugepage support is disabled in order that avoid
  * to risk increase the memory footprint of applications without a guaranteed
@@ -724,7 +751,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-       if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) {
+       if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -732,7 +759,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
        pgtable = pte_alloc_one(mm, haddr);
        if (unlikely(!pgtable)) {
-               mem_cgroup_cancel_charge(page, memcg);
+               mem_cgroup_cancel_charge(page, memcg, true);
                put_page(page);
                return VM_FAULT_OOM;
        }
@@ -748,7 +775,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_none(*pmd))) {
                spin_unlock(ptl);
-               mem_cgroup_cancel_charge(page, memcg);
+               mem_cgroup_cancel_charge(page, memcg, true);
                put_page(page);
                pte_free(mm, pgtable);
        } else {
@@ -759,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                        int ret;
 
                        spin_unlock(ptl);
-                       mem_cgroup_cancel_charge(page, memcg);
+                       mem_cgroup_cancel_charge(page, memcg, true);
                        put_page(page);
                        pte_free(mm, pgtable);
                        ret = handle_userfault(vma, address, flags,
@@ -770,8 +797,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               page_add_new_anon_rmap(page, vma, haddr);
-               mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, haddr, true);
+               mem_cgroup_commit_charge(page, memcg, false, true);
                lru_cache_add_active_or_unevictable(page, vma);
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                set_pmd_at(mm, haddr, pmd, entry);
@@ -1068,13 +1095,14 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                               vma, address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
-                                                  &memcg))) {
+                                                  &memcg, false))) {
                        if (pages[i])
                                put_page(pages[i]);
                        while (--i >= 0) {
                                memcg = (void *)page_private(pages[i]);
                                set_page_private(pages[i], 0);
-                               mem_cgroup_cancel_charge(pages[i], memcg);
+                               mem_cgroup_cancel_charge(pages[i], memcg,
+                                               false);
                                put_page(pages[i]);
                        }
                        kfree(pages);
@@ -1112,8 +1140,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                memcg = (void *)page_private(pages[i]);
                set_page_private(pages[i], 0);
-               page_add_new_anon_rmap(pages[i], vma, haddr);
-               mem_cgroup_commit_charge(pages[i], memcg, false);
+               page_add_new_anon_rmap(pages[i], vma, haddr, false);
+               mem_cgroup_commit_charge(pages[i], memcg, false, false);
                lru_cache_add_active_or_unevictable(pages[i], vma);
                pte = pte_offset_map(&_pmd, haddr);
                VM_BUG_ON(!pte_none(*pte));
@@ -1124,7 +1152,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
-       page_remove_rmap(page);
+       page_remove_rmap(page, true);
        spin_unlock(ptl);
 
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -1141,7 +1169,7 @@ out_free_pages:
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                memcg = (void *)page_private(pages[i]);
                set_page_private(pages[i], 0);
-               mem_cgroup_cancel_charge(pages[i], memcg);
+               mem_cgroup_cancel_charge(pages[i], memcg, false);
                put_page(pages[i]);
        }
        kfree(pages);
@@ -1171,7 +1199,17 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        page = pmd_page(orig_pmd);
        VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
-       if (page_mapcount(page) == 1) {
+       /*
+        * We can only reuse the page if nobody else maps the huge page or it's
+        * part. We can do it by checking page_mapcount() on each sub-page, but
+        * it's expensive.
+        * The cheaper way is to check page_count() to be equal 1: every
+        * mapcount takes page reference reference, so this way we can
+        * guarantee, that the PMD is the only mapping.
+        * This can give false negative if somebody pinned the page, but that's
+        * fine.
+        */
+       if (page_mapcount(page) == 1 && page_count(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -1207,7 +1245,8 @@ alloc:
                goto out;
        }
 
-       if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
+       if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
+                                          true))) {
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
@@ -1236,7 +1275,7 @@ alloc:
                put_user_huge_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(ptl);
-               mem_cgroup_cancel_charge(new_page, memcg);
+               mem_cgroup_cancel_charge(new_page, memcg, true);
                put_page(new_page);
                goto out_mn;
        } else {
@@ -1244,8 +1283,8 @@ alloc:
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                pmdp_huge_clear_flush_notify(vma, haddr, pmd);
-               page_add_new_anon_rmap(new_page, vma, haddr);
-               mem_cgroup_commit_charge(new_page, memcg, false);
+               page_add_new_anon_rmap(new_page, vma, haddr, true);
+               mem_cgroup_commit_charge(new_page, memcg, false, true);
                lru_cache_add_active_or_unevictable(new_page, vma);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
@@ -1254,7 +1293,7 @@ alloc:
                        put_huge_zero_page();
                } else {
                        VM_BUG_ON_PAGE(!PageHead(page), page);
-                       page_remove_rmap(page);
+                       page_remove_rmap(page, true);
                        put_page(page);
                }
                ret |= VM_FAULT_WRITE;
@@ -1481,7 +1520,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                put_huge_zero_page();
        } else {
                struct page *page = pmd_page(orig_pmd);
-               page_remove_rmap(page);
+               page_remove_rmap(page, true);
                VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                VM_BUG_ON_PAGE(!PageHead(page), page);
@@ -1778,7 +1817,7 @@ static void __split_huge_page_refcount(struct page *page,
                */
                page_tail->_mapcount = page->_mapcount;
 
-               BUG_ON(page_tail->mapping);
+               BUG_ON(page_tail->mapping != TAIL_MAPPING);
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
@@ -2198,26 +2237,33 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                                        unsigned long address,
                                        pte_t *pte)
 {
-       struct page *page;
+       struct page *page = NULL;
        pte_t *_pte;
-       int none_or_zero = 0;
+       int none_or_zero = 0, result = 0;
        bool referenced = false, writable = false;
+
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                if (pte_none(pteval) || (pte_present(pteval) &&
                                is_zero_pfn(pte_pfn(pteval)))) {
                        if (!userfaultfd_armed(vma) &&
-                           ++none_or_zero <= khugepaged_max_ptes_none)
+                           ++none_or_zero <= khugepaged_max_ptes_none) {
                                continue;
-                       else
+                       } else {
+                               result = SCAN_EXCEED_NONE_PTE;
                                goto out;
+                       }
                }
-               if (!pte_present(pteval))
+               if (!pte_present(pteval)) {
+                       result = SCAN_PTE_NON_PRESENT;
                        goto out;
+               }
                page = vm_normal_page(vma, address, pteval);
-               if (unlikely(!page))
+               if (unlikely(!page)) {
+                       result = SCAN_PAGE_NULL;
                        goto out;
+               }
 
                VM_BUG_ON_PAGE(PageCompound(page), page);
                VM_BUG_ON_PAGE(!PageAnon(page), page);
@@ -2229,8 +2275,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 * is needed to serialize against split_huge_page
                 * when invoked from the VM.
                 */
-               if (!trylock_page(page))
+               if (!trylock_page(page)) {
+                       result = SCAN_PAGE_LOCK;
                        goto out;
+               }
 
                /*
                 * cannot use mapcount: can't collapse if there's a gup pin.
@@ -2239,6 +2287,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 */
                if (page_count(page) != 1 + !!PageSwapCache(page)) {
                        unlock_page(page);
+                       result = SCAN_PAGE_COUNT;
                        goto out;
                }
                if (pte_write(pteval)) {
@@ -2246,6 +2295,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                } else {
                        if (PageSwapCache(page) && !reuse_swap_page(page)) {
                                unlock_page(page);
+                               result = SCAN_SWAP_CACHE_PAGE;
                                goto out;
                        }
                        /*
@@ -2260,6 +2310,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 */
                if (isolate_lru_page(page)) {
                        unlock_page(page);
+                       result = SCAN_DEL_PAGE_LRU;
                        goto out;
                }
                /* 0 stands for page_is_file_cache(page) == false */
@@ -2273,10 +2324,21 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = true;
        }
-       if (likely(referenced && writable))
-               return 1;
+       if (likely(writable)) {
+               if (likely(referenced)) {
+                       result = SCAN_SUCCEED;
+                       trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+                                                           referenced, writable, result);
+                       return 1;
+               }
+       } else {
+               result = SCAN_PAGE_RO;
+       }
+
 out:
        release_pte_pages(pte, _pte);
+       trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+                                           referenced, writable, result);
        return 0;
 }
 
@@ -2321,7 +2383,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                         * superfluous.
                         */
                        pte_clear(vma->vm_mm, address, _pte);
-                       page_remove_rmap(src_page);
+                       page_remove_rmap(src_page, false);
                        spin_unlock(ptl);
                        free_page_and_swap_cache(src_page);
                }
@@ -2513,7 +2575,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        pgtable_t pgtable;
        struct page *new_page;
        spinlock_t *pmd_ptl, *pte_ptl;
-       int isolated;
+       int isolated, result = 0;
        unsigned long hstart, hend;
        struct mem_cgroup *memcg;
        unsigned long mmun_start;       /* For mmu_notifiers */
@@ -2528,12 +2590,15 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        /* release the mmap_sem read lock. */
        new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
-       if (!new_page)
-               return;
+       if (!new_page) {
+               result = SCAN_ALLOC_HUGE_PAGE_FAIL;
+               goto out_nolock;
+       }
 
-       if (unlikely(mem_cgroup_try_charge(new_page, mm,
-                                          gfp, &memcg)))
-               return;
+       if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
+               result = SCAN_CGROUP_CHARGE_FAIL;
+               goto out_nolock;
+       }
 
        /*
         * Prevent all access to pagetables with the exception of
@@ -2541,21 +2606,31 @@ static void collapse_huge_page(struct mm_struct *mm,
         * handled by the anon_vma lock + PG_lock.
         */
        down_write(&mm->mmap_sem);
-       if (unlikely(khugepaged_test_exit(mm)))
+       if (unlikely(khugepaged_test_exit(mm))) {
+               result = SCAN_ANY_PROCESS;
                goto out;
+       }
 
        vma = find_vma(mm, address);
-       if (!vma)
+       if (!vma) {
+               result = SCAN_VMA_NULL;
                goto out;
+       }
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
-       if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+       if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
+               result = SCAN_ADDRESS_RANGE;
                goto out;
-       if (!hugepage_vma_check(vma))
+       }
+       if (!hugepage_vma_check(vma)) {
+               result = SCAN_VMA_CHECK;
                goto out;
+       }
        pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       if (!pmd) {
+               result = SCAN_PMD_NULL;
                goto out;
+       }
 
        anon_vma_lock_write(vma->anon_vma);
 
@@ -2592,6 +2667,7 @@ static void collapse_huge_page(struct mm_struct *mm,
                pmd_populate(mm, pmd, pmd_pgtable(_pmd));
                spin_unlock(pmd_ptl);
                anon_vma_unlock_write(vma->anon_vma);
+               result = SCAN_FAIL;
                goto out;
        }
 
@@ -2618,8 +2694,8 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
-       page_add_new_anon_rmap(new_page, vma, address);
-       mem_cgroup_commit_charge(new_page, memcg, false);
+       page_add_new_anon_rmap(new_page, vma, address, true);
+       mem_cgroup_commit_charge(new_page, memcg, false, true);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, address, pmd, _pmd);
@@ -2629,12 +2705,17 @@ static void collapse_huge_page(struct mm_struct *mm,
        *hpage = NULL;
 
        khugepaged_pages_collapsed++;
+       result = SCAN_SUCCEED;
 out_up_write:
        up_write(&mm->mmap_sem);
+       trace_mm_collapse_huge_page(mm, isolated, result);
        return;
 
+out_nolock:
+       trace_mm_collapse_huge_page(mm, isolated, result);
+       return;
 out:
-       mem_cgroup_cancel_charge(new_page, memcg);
+       mem_cgroup_cancel_charge(new_page, memcg, true);
        goto out_up_write;
 }
 
@@ -2645,8 +2726,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 {
        pmd_t *pmd;
        pte_t *pte, *_pte;
-       int ret = 0, none_or_zero = 0;
-       struct page *page;
+       int ret = 0, none_or_zero = 0, result = 0;
+       struct page *page = NULL;
        unsigned long _address;
        spinlock_t *ptl;
        int node = NUMA_NO_NODE;
@@ -2655,8 +2736,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       if (!pmd) {
+               result = SCAN_PMD_NULL;
                goto out;
+       }
 
        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2665,19 +2748,25 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                pte_t pteval = *_pte;
                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
                        if (!userfaultfd_armed(vma) &&
-                           ++none_or_zero <= khugepaged_max_ptes_none)
+                           ++none_or_zero <= khugepaged_max_ptes_none) {
                                continue;
-                       else
+                       } else {
+                               result = SCAN_EXCEED_NONE_PTE;
                                goto out_unmap;
+                       }
                }
-               if (!pte_present(pteval))
+               if (!pte_present(pteval)) {
+                       result = SCAN_PTE_NON_PRESENT;
                        goto out_unmap;
+               }
                if (pte_write(pteval))
                        writable = true;
 
                page = vm_normal_page(vma, _address, pteval);
-               if (unlikely(!page))
+               if (unlikely(!page)) {
+                       result = SCAN_PAGE_NULL;
                        goto out_unmap;
+               }
                /*
                 * Record which node the original page is from and save this
                 * information to khugepaged_node_load[].
@@ -2685,26 +2774,49 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * hit record.
                 */
                node = page_to_nid(page);
-               if (khugepaged_scan_abort(node))
+               if (khugepaged_scan_abort(node)) {
+                       result = SCAN_SCAN_ABORT;
                        goto out_unmap;
+               }
                khugepaged_node_load[node]++;
                VM_BUG_ON_PAGE(PageCompound(page), page);
-               if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
+               if (!PageLRU(page)) {
+                       result = SCAN_SCAN_ABORT;
                        goto out_unmap;
+               }
+               if (PageLocked(page)) {
+                       result = SCAN_PAGE_LOCK;
+                       goto out_unmap;
+               }
+               if (!PageAnon(page)) {
+                       result = SCAN_PAGE_ANON;
+                       goto out_unmap;
+               }
+
                /*
                 * cannot use mapcount: can't collapse if there's a gup pin.
                 * The page must only be referenced by the scanned process
                 * and page swap cache.
                 */
-               if (page_count(page) != 1 + !!PageSwapCache(page))
+               if (page_count(page) != 1 + !!PageSwapCache(page)) {
+                       result = SCAN_PAGE_COUNT;
                        goto out_unmap;
+               }
                if (pte_young(pteval) ||
                    page_is_young(page) || PageReferenced(page) ||
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = true;
        }
-       if (referenced && writable)
-               ret = 1;
+       if (writable) {
+               if (referenced) {
+                       result = SCAN_SUCCEED;
+                       ret = 1;
+               } else {
+                       result = SCAN_NO_REFERENCED_PAGE;
+               }
+       } else {
+               result = SCAN_PAGE_RO;
+       }
 out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret) {
@@ -2713,6 +2825,8 @@ out_unmap:
                collapse_huge_page(mm, address, hpage, vma, node);
        }
 out:
+       trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
+                                    none_or_zero, result);
        return ret;
 }