mm: percpu: use pr_fmt to prefix output
[cascardo/linux.git] / mm / migrate.c
index 568284e..577c94b 100644 (file)
@@ -349,7 +349,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -363,7 +363,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
-               page_unfreeze_refs(page, expected_count);
+               page_ref_unfreeze(page, expected_count);
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -397,7 +397,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock(&mapping->tree_lock);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -451,7 +451,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -463,7 +463,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
 
@@ -1773,7 +1773,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-
+       /*
+        * We are not sure a pending tlb flush here is for a huge page
+        * mapping or not. Hence use the tlb range variant
+        */
        if (mm_tlb_flush_pending(mm))
                flush_tlb_range(vma, mmun_start, mmun_end);
 
@@ -1829,12 +1832,11 @@ fail_putback:
        page_add_anon_rmap(new_page, vma, mmun_start, true);
        pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
-       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
                set_pmd_at(mm, mmun_start, pmd, orig_entry);
-               flush_tlb_range(vma, mmun_start, mmun_end);
+               flush_pmd_tlb_range(vma, mmun_start, mmun_end);
                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page, true);