Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[cascardo/linux.git] / mm / memory.c
index d3cb2ef..c3b9097 100644 (file)
@@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
        /* Is it from 0 to ~0? */
        tlb->fullmm     = !(start | (end+1));
        tlb->need_flush_all = 0;
-       tlb->start      = start;
-       tlb->end        = end;
-       tlb->need_flush = 0;
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
        tlb->local.max  = ARRAY_SIZE(tlb->__pages);
@@ -232,16 +229,21 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb->batch = NULL;
 #endif
+
+       __tlb_reset_range(tlb);
 }
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-       tlb->need_flush = 0;
+       if (!tlb->end)
+               return;
+
        tlb_flush(tlb);
        mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
 #endif
+       __tlb_reset_range(tlb);
 }
 
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
@@ -257,8 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->need_flush)
-               return;
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
@@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
        struct mmu_gather_batch *batch;
 
-       VM_BUG_ON(!tlb->need_flush);
+       VM_BUG_ON(!tlb->end);
 
        batch = tlb->active;
        batch->pages[batch->nr++] = page;
@@ -360,8 +360,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 {
        struct mmu_table_batch **batch = &tlb->batch;
 
-       tlb->need_flush = 1;
-
        /*
         * When there's less then two users of this mm there cannot be a
         * concurrent page-table walk.
@@ -1187,20 +1185,8 @@ again:
        arch_leave_lazy_mmu_mode();
 
        /* Do the actual TLB flush before dropping ptl */
-       if (force_flush) {
-               unsigned long old_end;
-
-               /*
-                * Flush the TLB just for the previous segment,
-                * then update the range to be the remaining
-                * TLB range.
-                */
-               old_end = tlb->end;
-               tlb->end = addr;
+       if (force_flush)
                tlb_flush_mmu_tlbonly(tlb);
-               tlb->start = addr;
-               tlb->end = old_end;
-       }
        pte_unmap_unlock(start_pte, ptl);
 
        /*
@@ -1341,9 +1327,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
                         * safe to do nothing in this case.
                         */
                        if (vma->vm_file) {
-                               mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                               i_mmap_lock_write(vma->vm_file->f_mapping);
                                __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
-                               mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                               i_mmap_unlock_write(vma->vm_file->f_mapping);
                        }
                } else
                        unmap_page_range(tlb, vma, start, end, details);
@@ -2392,12 +2378,12 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
 
 
-       mutex_lock(&mapping->i_mmap_mutex);
+       i_mmap_lock_read(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       mutex_unlock(&mapping->i_mmap_mutex);
+       i_mmap_unlock_read(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
@@ -2642,7 +2628,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_SIGBUS;
 
        /* Use the zero-page for reads */
-       if (!(flags & FAULT_FLAG_WRITE)) {
+       if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
                page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -3380,6 +3366,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(handle_mm_fault);
 
 #ifndef __PAGETABLE_PUD_FOLDED
 /*