qlcnic: Fix return value in qlcnic_probe()
[cascardo/linux.git] / mm / mmap.c
index ae91989..7b36aa7 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,7 +232,7 @@ error:
 }
 
 /*
- * Requires inode->i_mapping->i_mmap_mutex
+ * Requires inode->i_mapping->i_mmap_rwsem
  */
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
@@ -260,9 +260,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
 
        if (file) {
                struct address_space *mapping = file->f_mapping;
-               mutex_lock(&mapping->i_mmap_mutex);
+               i_mmap_lock_write(mapping);
                __remove_shared_vm_struct(vma, file, mapping);
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
        }
 }
 
@@ -674,14 +674,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
-               mutex_lock(&mapping->i_mmap_mutex);
+               i_mmap_lock_write(mapping);
        }
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
        if (mapping)
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
 
        mm->map_count++;
        validate_mm(mm);
@@ -796,7 +796,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                                                        next->vm_end);
                }
 
-               mutex_lock(&mapping->i_mmap_mutex);
+               i_mmap_lock_write(mapping);
                if (insert) {
                        /*
                         * Put into interval tree now, so instantiated pages
@@ -883,7 +883,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                anon_vma_unlock_write(anon_vma);
        }
        if (mapping)
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
 
        if (root) {
                uprobe_mmap(vma);
@@ -2362,6 +2362,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 }
 #endif
 
+EXPORT_SYMBOL_GPL(find_extend_vma);
+
 /*
  * Ok - we have the memory areas we should free on the vma list,
  * so release them, and do the vma updates.
@@ -2601,6 +2603,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
        unmap_region(mm, vma, prev, start, end);
 
+       arch_unmap(mm, vma, start, end);
+
        /* Fix up all other VM information */
        remove_vma_list(mm, vma);
 
@@ -2789,7 +2793,7 @@ void exit_mmap(struct mm_struct *mm)
 
 /* Insert vm structure into process list sorted by address
  * and into the inode's i_mmap tree.  If vm_file is non-NULL
- * then i_mmap_mutex is taken here.
+ * then i_mmap_rwsem is taken here.
  */
 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
@@ -3084,7 +3088,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
+               down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
        }
 }
 
@@ -3111,7 +3115,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
+ * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
@@ -3180,7 +3184,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
                 * AS_MM_ALL_LOCKS can't change to 0 from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
                if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
                                        &mapping->flags))
                        BUG();