mm: simplify lock_page_memcg()
[cascardo/linux.git] / mm / rmap.c
index 2871e7d..02f0bfc 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1287,21 +1287,17 @@ void page_add_new_anon_rmap(struct page *page,
  */
 void page_add_file_rmap(struct page *page)
 {
-       struct mem_cgroup *memcg;
-
-       memcg = lock_page_memcg(page);
+       lock_page_memcg(page);
        if (atomic_inc_and_test(&page->_mapcount)) {
                __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+               mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
        }
-       unlock_page_memcg(memcg);
+       unlock_page_memcg(page);
 }
 
 static void page_remove_file_rmap(struct page *page)
 {
-       struct mem_cgroup *memcg;
-
-       memcg = lock_page_memcg(page);
+       lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
        if (unlikely(PageHuge(page))) {
@@ -1320,12 +1316,12 @@ static void page_remove_file_rmap(struct page *page)
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
        __dec_zone_page_state(page, NR_FILE_MAPPED);
-       mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+       mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
 out:
-       unlock_page_memcg(memcg);
+       unlock_page_memcg(page);
 }
 
 static void page_remove_anon_compound_rmap(struct page *page)