rmap: introduce rmap_walk_locked()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 17 Mar 2016 21:20:01 +0000 (14:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 22:09:34 +0000 (15:09 -0700)
This patchset rewrites freeze_page() and unfreeze_page() using
try_to_unmap() and remove_migration_ptes().  Result is much simpler, but
somewhat slower.

Migration 8GiB worth of PMD-mapped THP:

  Baseline 20.21 +/- 0.393
  Patched 20.73 +/- 0.082
  Slowdown 1.03x

It's 3% slower, comparing to 14% in v1.  I don't it should be a stopper.

Splitting of PTE-mapped pages slowed more.  But this is not a common
case.

Migration 8GiB worth of PMD-mapped THP:

  Baseline 20.39 +/- 0.225
  Patched 22.43 +/- 0.496
  Slowdown 1.10x

rmap_walk_locked() is the same as rmap_walk(), but the caller takes care
of the relevant rmap lock.

This is preparation for switching THP splitting from custom rmap walk in
freeze_page()/unfreeze_page() to the generic one.

There is no support for KSM pages for now: not clear which lock is
implied.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/rmap.c

index a07f42b..a5875e9 100644 (file)
@@ -266,6 +266,7 @@ struct rmap_walk_control {
 };
 
 int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 
 #else  /* !CONFIG_MMU */
 
index 02f0bfc..30b739c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1715,14 +1715,21 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+               bool locked)
 {
        struct anon_vma *anon_vma;
        pgoff_t pgoff;
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
-       anon_vma = rmap_walk_anon_lock(page, rwc);
+       if (locked) {
+               anon_vma = page_anon_vma(page);
+               /* anon_vma disappear under us? */
+               VM_BUG_ON_PAGE(!anon_vma, page);
+       } else {
+               anon_vma = rmap_walk_anon_lock(page, rwc);
+       }
        if (!anon_vma)
                return ret;
 
@@ -1742,7 +1749,9 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
                if (rwc->done && rwc->done(page))
                        break;
        }
-       anon_vma_unlock_read(anon_vma);
+
+       if (!locked)
+               anon_vma_unlock_read(anon_vma);
        return ret;
 }
 
@@ -1759,9 +1768,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
+static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+               bool locked)
 {
-       struct address_space *mapping = page->mapping;
+       struct address_space *mapping = page_mapping(page);
        pgoff_t pgoff;
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
@@ -1778,7 +1788,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
                return ret;
 
        pgoff = page_to_pgoff(page);
-       i_mmap_lock_read(mapping);
+       if (!locked)
+               i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
 
@@ -1795,7 +1806,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
        }
 
 done:
-       i_mmap_unlock_read(mapping);
+       if (!locked)
+               i_mmap_unlock_read(mapping);
        return ret;
 }
 
@@ -1804,9 +1816,20 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
        if (unlikely(PageKsm(page)))
                return rmap_walk_ksm(page, rwc);
        else if (PageAnon(page))
-               return rmap_walk_anon(page, rwc);
+               return rmap_walk_anon(page, rwc, false);
+       else
+               return rmap_walk_file(page, rwc, false);
+}
+
+/* Like rmap_walk, but caller holds relevant rmap lock */
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+{
+       /* no ksm support for now */
+       VM_BUG_ON_PAGE(PageKsm(page), page);
+       if (PageAnon(page))
+               return rmap_walk_anon(page, rwc, true);
        else
-               return rmap_walk_file(page, rwc);
+               return rmap_walk_file(page, rwc, true);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE