slub: drop bogus inline for fixup_red_left()
[cascardo/linux.git] / mm / rmap.c
index 8a13d9f..709bc83 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -27,7 +27,7 @@
  *         mapping->i_mmap_rwsem
  *           anon_vma->rwsem
  *             mm->page_table_lock or pte_lock
- *               zone->lru_lock (in mark_page_accessed, isolate_lru_page)
+ *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)
  *               swap_lock (in swap_duplicate, swap_info_get)
  *                 mmlist_lock (in mmput, drain_mmlist and others)
  *                 mapping->private_lock (in __set_page_dirty_buffers)
@@ -1213,8 +1213,8 @@ void do_page_add_anon_rmap(struct page *page,
                 * disabled.
                 */
                if (compound)
-                       __inc_zone_page_state(page, NR_ANON_THPS);
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
+                       __inc_node_page_state(page, NR_ANON_THPS);
+               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
        }
        if (unlikely(PageKsm(page)))
                return;
@@ -1251,14 +1251,14 @@ void page_add_new_anon_rmap(struct page *page,
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                /* increment count (starts at -1) */
                atomic_set(compound_mapcount_ptr(page), 0);
-               __inc_zone_page_state(page, NR_ANON_THPS);
+               __inc_node_page_state(page, NR_ANON_THPS);
        } else {
                /* Anon THP always mapped first with PMD */
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
                /* increment count (starts at -1) */
                atomic_set(&page->_mapcount, 0);
        }
-       __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
+       __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
        __page_set_anon_rmap(page, vma, address, 1);
 }
 
@@ -1282,7 +1282,7 @@ void page_add_file_rmap(struct page *page, bool compound)
                if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
                        goto out;
                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-               __inc_zone_page_state(page, NR_SHMEM_PMDMAPPED);
+               __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
        } else {
                if (PageTransCompound(page)) {
                        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1293,7 +1293,7 @@ void page_add_file_rmap(struct page *page, bool compound)
                if (!atomic_inc_and_test(&page->_mapcount))
                        goto out;
        }
-       __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr);
+       __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
        mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
 out:
        unlock_page_memcg(page);
@@ -1322,18 +1322,18 @@ static void page_remove_file_rmap(struct page *page, bool compound)
                if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
                        goto out;
                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-               __dec_zone_page_state(page, NR_SHMEM_PMDMAPPED);
+               __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
        } else {
                if (!atomic_add_negative(-1, &page->_mapcount))
                        goto out;
        }
 
        /*
-        * We use the irq-unsafe __{inc|mod}_zone_page_stat because
+        * We use the irq-unsafe __{inc|mod}_zone_page_state because
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr);
+       __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
 
        if (unlikely(PageMlocked(page)))
@@ -1356,7 +1356,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                return;
 
-       __dec_zone_page_state(page, NR_ANON_THPS);
+       __dec_node_page_state(page, NR_ANON_THPS);
 
        if (TestClearPageDoubleMap(page)) {
                /*
@@ -1375,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
                clear_page_mlock(page);
 
        if (nr) {
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
+               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
                deferred_split_huge_page(page);
        }
 }
@@ -1404,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound)
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __dec_zone_page_state(page, NR_ANON_PAGES);
+       __dec_node_page_state(page, NR_ANON_MAPPED);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);