tools: selftests: fix build issue with make kselftests target
[cascardo/linux.git] / mm / migrate.c
index be6dbf9..f78ec9b 100644 (file)
@@ -780,6 +780,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        if (rc != MIGRATEPAGE_SUCCESS) {
                newpage->mapping = NULL;
        } else {
+               mem_cgroup_migrate(page, newpage, false);
                if (remap_swapcache)
                        remove_migration_ptes(page, newpage);
                page->mapping = NULL;
@@ -795,7 +796,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 {
        int rc = -EAGAIN;
        int remap_swapcache = 1;
-       struct mem_cgroup *mem;
        struct anon_vma *anon_vma = NULL;
 
        if (!trylock_page(page)) {
@@ -821,9 +821,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                lock_page(page);
        }
 
-       /* charge against new page */
-       mem_cgroup_prepare_migration(page, newpage, &mem);
-
        if (PageWriteback(page)) {
                /*
                 * Only in the case of a full synchronous migration is it
@@ -833,10 +830,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                 */
                if (mode != MIGRATE_SYNC) {
                        rc = -EBUSY;
-                       goto uncharge;
+                       goto out_unlock;
                }
                if (!force)
-                       goto uncharge;
+                       goto out_unlock;
                wait_on_page_writeback(page);
        }
        /*
@@ -872,7 +869,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                         */
                        remap_swapcache = 0;
                } else {
-                       goto uncharge;
+                       goto out_unlock;
                }
        }
 
@@ -885,7 +882,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                 * the page migration right away (proteced by page lock).
                 */
                rc = balloon_page_migrate(newpage, page, mode);
-               goto uncharge;
+               goto out_unlock;
        }
 
        /*
@@ -904,7 +901,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                VM_BUG_ON_PAGE(PageAnon(page), page);
                if (page_has_private(page)) {
                        try_to_free_buffers(page);
-                       goto uncharge;
+                       goto out_unlock;
                }
                goto skip_unmap;
        }
@@ -923,10 +920,7 @@ skip_unmap:
        if (anon_vma)
                put_anon_vma(anon_vma);
 
-uncharge:
-       mem_cgroup_end_migration(mem, page, newpage,
-                                (rc == MIGRATEPAGE_SUCCESS ||
-                                 rc == MIGRATEPAGE_BALLOON_SUCCESS));
+out_unlock:
        unlock_page(page);
 out:
        return rc;
@@ -1786,7 +1780,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated = 0;
        struct page *new_page = NULL;
-       struct mem_cgroup *memcg = NULL;
        int page_lru = page_is_file_cache(page);
        unsigned long mmun_start = address & HPAGE_PMD_MASK;
        unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
@@ -1852,15 +1845,6 @@ fail_putback:
                goto out_unlock;
        }
 
-       /*
-        * Traditional migration needs to prepare the memcg charge
-        * transaction early to prevent the old page from being
-        * uncharged when installing migration entries.  Here we can
-        * save the potential rollback and start the charge transfer
-        * only when migration is already known to end successfully.
-        */
-       mem_cgroup_prepare_migration(page, new_page, &memcg);
-
        orig_entry = *pmd;
        entry = mk_pmd(new_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
@@ -1888,14 +1872,10 @@ fail_putback:
                goto fail_putback;
        }
 
+       mem_cgroup_migrate(page, new_page, false);
+
        page_remove_rmap(page);
 
-       /*
-        * Finish the charge transaction under the page table lock to
-        * prevent split_huge_page() from dividing up the charge
-        * before it's fully transferred to the new page.
-        */
-       mem_cgroup_end_migration(memcg, page, new_page, true);
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);