Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[cascardo/linux.git] / mm / rmap.c
index 701b93f..709bc83 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -27,7 +27,7 @@
  *         mapping->i_mmap_rwsem
  *           anon_vma->rwsem
  *             mm->page_table_lock or pte_lock
- *               zone->lru_lock (in mark_page_accessed, isolate_lru_page)
+ *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)
  *               swap_lock (in swap_duplicate, swap_info_get)
  *                 mmlist_lock (in mmput, drain_mmlist and others)
  *                 mapping->private_lock (in __set_page_dirty_buffers)
@@ -1212,11 +1212,9 @@ void do_page_add_anon_rmap(struct page *page,
                 * pte lock(a spinlock) is held, which implies preemption
                 * disabled.
                 */
-               if (compound) {
-                       __inc_zone_page_state(page,
-                                             NR_ANON_TRANSPARENT_HUGEPAGES);
-               }
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
+               if (compound)
+                       __inc_node_page_state(page, NR_ANON_THPS);
+               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
        }
        if (unlikely(PageKsm(page)))
                return;
@@ -1253,14 +1251,14 @@ void page_add_new_anon_rmap(struct page *page,
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                /* increment count (starts at -1) */
                atomic_set(compound_mapcount_ptr(page), 0);
-               __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+               __inc_node_page_state(page, NR_ANON_THPS);
        } else {
                /* Anon THP always mapped first with PMD */
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
                /* increment count (starts at -1) */
                atomic_set(&page->_mapcount, 0);
        }
-       __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
+       __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
        __page_set_anon_rmap(page, vma, address, 1);
 }
 
@@ -1270,18 +1268,42 @@ void page_add_new_anon_rmap(struct page *page,
  *
  * The caller needs to hold the pte lock.
  */
-void page_add_file_rmap(struct page *page)
+void page_add_file_rmap(struct page *page, bool compound)
 {
+       int i, nr = 1;
+
+       VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
        lock_page_memcg(page);
-       if (atomic_inc_and_test(&page->_mapcount)) {
-               __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+       if (compound && PageTransHuge(page)) {
+               for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
+                       if (atomic_inc_and_test(&page[i]._mapcount))
+                               nr++;
+               }
+               if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
+                       goto out;
+               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+               __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
+       } else {
+               if (PageTransCompound(page)) {
+                       VM_BUG_ON_PAGE(!PageLocked(page), page);
+                       SetPageDoubleMap(compound_head(page));
+                       if (PageMlocked(page))
+                               clear_page_mlock(compound_head(page));
+               }
+               if (!atomic_inc_and_test(&page->_mapcount))
+                       goto out;
        }
+       __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
+       mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+out:
        unlock_page_memcg(page);
 }
 
-static void page_remove_file_rmap(struct page *page)
+static void page_remove_file_rmap(struct page *page, bool compound)
 {
+       int i, nr = 1;
+
+       VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
        lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
@@ -1292,15 +1314,26 @@ static void page_remove_file_rmap(struct page *page)
        }
 
        /* page still mapped by someone else? */
-       if (!atomic_add_negative(-1, &page->_mapcount))
-               goto out;
+       if (compound && PageTransHuge(page)) {
+               for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
+                       if (atomic_add_negative(-1, &page[i]._mapcount))
+                               nr++;
+               }
+               if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
+                       goto out;
+               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+               __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
+       } else {
+               if (!atomic_add_negative(-1, &page->_mapcount))
+                       goto out;
+       }
 
        /*
-        * We use the irq-unsafe __{inc|mod}_zone_page_stat because
+        * We use the irq-unsafe __{inc|mod}_zone_page_state because
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __dec_zone_page_state(page, NR_FILE_MAPPED);
+       __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
 
        if (unlikely(PageMlocked(page)))
@@ -1323,7 +1356,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                return;
 
-       __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+       __dec_node_page_state(page, NR_ANON_THPS);
 
        if (TestClearPageDoubleMap(page)) {
                /*
@@ -1342,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
                clear_page_mlock(page);
 
        if (nr) {
-               __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
+               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
                deferred_split_huge_page(page);
        }
 }
@@ -1356,11 +1389,8 @@ static void page_remove_anon_compound_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page, bool compound)
 {
-       if (!PageAnon(page)) {
-               VM_BUG_ON_PAGE(compound && !PageHuge(page), page);
-               page_remove_file_rmap(page);
-               return;
-       }
+       if (!PageAnon(page))
+               return page_remove_file_rmap(page, compound);
 
        if (compound)
                return page_remove_anon_compound_rmap(page);
@@ -1374,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound)
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __dec_zone_page_state(page, NR_ANON_PAGES);
+       __dec_node_page_state(page, NR_ANON_MAPPED);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
@@ -1436,8 +1466,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         */
        if (!(flags & TTU_IGNORE_MLOCK)) {
                if (vma->vm_flags & VM_LOCKED) {
-                       /* Holding pte lock, we do *not* need mmap_sem here */
-                       mlock_vma_page(page);
+                       /* PTE-mapped THP are never mlocked */
+                       if (!PageTransCompound(page)) {
+                               /*
+                                * Holding pte lock, we do *not* need
+                                * mmap_sem here
+                                */
+                               mlock_vma_page(page);
+                       }
                        ret = SWAP_MLOCK;
                        goto out_unmap;
                }