mm: rmap: split out page_remove_file_rmap()
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 29 Oct 2014 21:50:51 +0000 (14:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 29 Oct 2014 23:33:15 +0000 (16:33 -0700)
page_remove_rmap() has too many branches on PageAnon() and is hard to
follow.  Move the file part into a separate function.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/rmap.c

index f574046..19886fb 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1054,6 +1054,36 @@ void page_add_file_rmap(struct page *page)
        mem_cgroup_end_page_stat(memcg, locked, flags);
 }
 
+static void page_remove_file_rmap(struct page *page)
+{
+       struct mem_cgroup *memcg;
+       unsigned long flags;
+       bool locked;
+
+       memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+
+       /* page still mapped by someone else? */
+       if (!atomic_add_negative(-1, &page->_mapcount))
+               goto out;
+
+       /* Hugepages are not counted in NR_FILE_MAPPED for now. */
+       if (unlikely(PageHuge(page)))
+               goto out;
+
+       /*
+        * We use the irq-unsafe __{inc|mod}_zone_page_stat because
+        * these counters are not modified in interrupt context, and
+        * pte lock(a spinlock) is held, which implies preemption disabled.
+        */
+       __dec_zone_page_state(page, NR_FILE_MAPPED);
+       mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+
+       if (unlikely(PageMlocked(page)))
+               clear_page_mlock(page);
+out:
+       mem_cgroup_end_page_stat(memcg, locked, flags);
+}
+
 /**
  * page_remove_rmap - take down pte mapping from a page
  * @page: page to remove mapping from
@@ -1062,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page)
 {
-       struct mem_cgroup *uninitialized_var(memcg);
-       bool anon = PageAnon(page);
-       unsigned long flags;
-       bool locked;
-
-       /*
-        * The anon case has no mem_cgroup page_stat to update; but may
-        * uncharge_page() below, where the lock ordering can deadlock if
-        * we hold the lock against page_stat move: so avoid it on anon.
-        */
-       if (!anon)
-               memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+       if (!PageAnon(page)) {
+               page_remove_file_rmap(page);
+               return;
+       }
 
        /* page still mapped by someone else? */
        if (!atomic_add_negative(-1, &page->_mapcount))
-               goto out;
+               return;
+
+       /* Hugepages are not counted in NR_ANON_PAGES for now. */
+       if (unlikely(PageHuge(page)))
+               return;
 
        /*
-        * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
-        * and not charged by memcg for now.
-        *
         * We use the irq-unsafe __{inc|mod}_zone_page_stat because
         * these counters are not modified in interrupt context, and
-        * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       if (unlikely(PageHuge(page)))
-               goto out;
-       if (anon) {
-               if (PageTransHuge(page))
-                       __dec_zone_page_state(page,
-                                             NR_ANON_TRANSPARENT_HUGEPAGES);
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-                               -hpage_nr_pages(page));
-       } else {
-               __dec_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
-       }
+       if (PageTransHuge(page))
+               __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+
+       __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
+                             -hpage_nr_pages(page));
+
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
+
        /*
         * It would be tidy to reset the PageAnon mapping here,
         * but that might overwrite a racing page_add_anon_rmap
@@ -1111,9 +1128,6 @@ void page_remove_rmap(struct page *page)
         * Leaving it set also helps swapoff to reinstate ptes
         * faster for those pages still in swapcache.
         */
-out:
-       if (!anon)
-               mem_cgroup_end_page_stat(memcg, locked, flags);
 }
 
 /*