Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[cascardo/linux.git] / mm / nommu.c
index bd1808e..b51eadf 100644 (file)
@@ -722,11 +722,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
-               mutex_lock(&mapping->i_mmap_mutex);
+               i_mmap_lock_write(mapping);
                flush_dcache_mmap_lock(mapping);
                vma_interval_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
        }
 
        /* add the VMA to the tree */
@@ -795,11 +795,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
-               mutex_lock(&mapping->i_mmap_mutex);
+               i_mmap_lock_write(mapping);
                flush_dcache_mmap_lock(mapping);
                vma_interval_tree_remove(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
-               mutex_unlock(&mapping->i_mmap_mutex);
+               i_mmap_unlock_write(mapping);
        }
 
        /* remove from the MM's tree and list */
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
                           unsigned long len,
                           unsigned long capabilities)
 {
-       struct page *pages;
-       unsigned long total, point, n;
+       unsigned long total, point;
        void *base;
        int ret, order;
 
@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma,
        order = get_order(len);
        kdebug("alloc order %d for %lx", order, len);
 
-       pages = alloc_pages(GFP_KERNEL, order);
-       if (!pages)
-               goto enomem;
-
        total = 1 << order;
-       atomic_long_add(total, &mmap_pages_allocated);
-
        point = len >> PAGE_SHIFT;
 
-       /* we allocated a power-of-2 sized page set, so we may want to trim off
-        * the excess */
+       /* we don't want to allocate a power-of-2 sized page set */
        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
-               while (total > point) {
-                       order = ilog2(total - point);
-                       n = 1 << order;
-                       kdebug("shave %lu/%lu @%lu", n, total - point, total);
-                       atomic_long_sub(n, &mmap_pages_allocated);
-                       total -= n;
-                       set_page_refcounted(pages + total);
-                       __free_pages(pages + total, order);
-               }
+               total = point;
+               kdebug("try to alloc exact %lu pages", total);
+               base = alloc_pages_exact(len, GFP_KERNEL);
+       } else {
+               base = (void *)__get_free_pages(GFP_KERNEL, order);
        }
 
-       for (point = 1; point < total; point++)
-               set_page_refcounted(&pages[point]);
+       if (!base)
+               goto enomem;
+
+       atomic_long_add(total, &mmap_pages_allocated);
 
-       base = page_address(pages);
        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
        region->vm_start = (unsigned long) base;
        region->vm_end   = region->vm_start + len;
@@ -2094,14 +2083,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        down_write(&nommu_region_sem);
-       mutex_lock(&inode->i_mapping->i_mmap_mutex);
+       i_mmap_lock_read(inode->i_mapping);
 
        /* search for VMAs that fall within the dead zone */
        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
                /* found one - only interested if it's shared out of the page
                 * cache */
                if (vma->vm_flags & VM_SHARED) {
-                       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+                       i_mmap_unlock_read(inode->i_mapping);
                        up_write(&nommu_region_sem);
                        return -ETXTBSY; /* not quite true, but near enough */
                }
@@ -2113,8 +2102,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
         * we don't check for any regions that start beyond the EOF as there
         * shouldn't be any
         */
-       vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
-                                 0, ULONG_MAX) {
+       vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
                if (!(vma->vm_flags & VM_SHARED))
                        continue;
 
@@ -2129,7 +2117,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                }
        }
 
-       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+       i_mmap_unlock_read(inode->i_mapping);
        up_write(&nommu_region_sem);
        return 0;
 }