mm: memcontrol: rewrite charge API
[cascardo/linux.git] / mm / shmem.c
index af68b15..1f1a808 100644 (file)
@@ -149,6 +149,19 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
                vm_unacct_memory(VM_ACCT(size));
 }
 
+static inline int shmem_reacct_size(unsigned long flags,
+               loff_t oldsize, loff_t newsize)
+{
+       if (!(flags & VM_NORESERVE)) {
+               if (VM_ACCT(newsize) > VM_ACCT(oldsize))
+                       return security_vm_enough_memory_mm(current->mm,
+                                       VM_ACCT(newsize) - VM_ACCT(oldsize));
+               else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
+                       vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
+       }
+       return 0;
+}
+
 /*
  * ... whereas tmpfs objects are accounted incrementally as
  * pages are allocated, in order to allow huge sparse files.
@@ -280,7 +293,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
  */
 static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
-                                  pgoff_t index, gfp_t gfp, void *expected)
+                                  pgoff_t index, void *expected)
 {
        int error;
 
@@ -549,6 +562,10 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                loff_t newsize = attr->ia_size;
 
                if (newsize != oldsize) {
+                       error = shmem_reacct_size(SHMEM_I(inode)->flags,
+                                       oldsize, newsize);
+                       if (error)
+                               return error;
                        i_size_write(inode, newsize);
                        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
                }
@@ -604,7 +621,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
        radswap = swp_to_radix_entry(swap);
        index = radix_tree_locate_item(&mapping->page_tree, radswap);
        if (index == -1)
-               return 0;
+               return -EAGAIN; /* tell shmem_unuse we found nothing */
 
        /*
         * Move _head_ to start search for next from here.
@@ -649,7 +666,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
         */
        if (!error)
                error = shmem_add_to_page_cache(*pagep, mapping, index,
-                                               GFP_NOWAIT, radswap);
+                                               radswap);
        if (error != -ENOMEM) {
                /*
                 * Truncation and eviction use free_swap_and_cache(), which
@@ -663,7 +680,6 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
                        spin_unlock(&info->lock);
                        swap_free(swap);
                }
-               error = 1;      /* not an error, but entry was found */
        }
        return error;
 }
@@ -675,7 +691,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
 {
        struct list_head *this, *next;
        struct shmem_inode_info *info;
-       int found = 0;
+       struct mem_cgroup *memcg;
        int error = 0;
 
        /*
@@ -690,26 +706,32 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
         * the shmem_swaplist_mutex which might hold up shmem_writepage().
         * Charged back to the user (not to caller) when swap account is used.
         */
-       error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL);
+       error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg);
        if (error)
                goto out;
        /* No radix_tree_preload: swap entry keeps a place for page in tree */
+       error = -EAGAIN;
 
        mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(this, next, &shmem_swaplist) {
                info = list_entry(this, struct shmem_inode_info, swaplist);
                if (info->swapped)
-                       found = shmem_unuse_inode(info, swap, &page);
+                       error = shmem_unuse_inode(info, swap, &page);
                else
                        list_del_init(&info->swaplist);
                cond_resched();
-               if (found)
+               if (error != -EAGAIN)
                        break;
+               /* found nothing in this: move on to search the next */
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (found < 0)
-               error = found;
+       if (error) {
+               if (error != -ENOMEM)
+                       error = 0;
+               mem_cgroup_cancel_charge(page, memcg);
+       } else
+               mem_cgroup_commit_charge(page, memcg, true);
 out:
        unlock_page(page);
        page_cache_release(page);
@@ -1013,6 +1035,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info;
        struct shmem_sb_info *sbinfo;
+       struct mem_cgroup *memcg;
        struct page *page;
        swp_entry_t swap;
        int error;
@@ -1091,11 +1114,10 @@ repeat:
                                goto failed;
                }
 
-               error = mem_cgroup_charge_file(page, current->mm,
-                                               gfp & GFP_RECLAIM_MASK);
+               error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, swp_to_radix_entry(swap));
+                                               swp_to_radix_entry(swap));
                        /*
                         * We already confirmed swap under page lock, and make
                         * no memory allocation here, so usually no possibility
@@ -1108,12 +1130,16 @@ repeat:
                         * Reset swap.val? No, leave it so "failed" goes back to
                         * "repeat": reading a hole and writing should succeed.
                         */
-                       if (error)
+                       if (error) {
+                               mem_cgroup_cancel_charge(page, memcg);
                                delete_from_swap_cache(page);
+                       }
                }
                if (error)
                        goto failed;
 
+               mem_cgroup_commit_charge(page, memcg, true);
+
                spin_lock(&info->lock);
                info->swapped--;
                shmem_recalc_inode(inode);
@@ -1149,22 +1175,22 @@ repeat:
                __SetPageSwapBacked(page);
                __set_page_locked(page);
                if (sgp == SGP_WRITE)
-                       init_page_accessed(page);
+                       __SetPageReferenced(page);
 
-               error = mem_cgroup_charge_file(page, current->mm,
-                                               gfp & GFP_RECLAIM_MASK);
+               error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
                if (error)
                        goto decused;
                error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                                       gfp, NULL);
+                                                       NULL);
                        radix_tree_preload_end();
                }
                if (error) {
-                       mem_cgroup_uncharge_cache_page(page);
+                       mem_cgroup_cancel_charge(page, memcg);
                        goto decused;
                }
+               mem_cgroup_commit_charge(page, memcg, false);
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -2932,16 +2958,16 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        this.len = strlen(name);
        this.hash = 0; /* will go */
        sb = shm_mnt->mnt_sb;
+       path.mnt = mntget(shm_mnt);
        path.dentry = d_alloc_pseudo(sb, &this);
        if (!path.dentry)
                goto put_memory;
        d_set_d_op(path.dentry, &anon_ops);
-       path.mnt = mntget(shm_mnt);
 
        res = ERR_PTR(-ENOSPC);
        inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
        if (!inode)
-               goto put_dentry;
+               goto put_memory;
 
        inode->i_flags |= i_flags;
        d_instantiate(path.dentry, inode);
@@ -2949,19 +2975,19 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        clear_nlink(inode);     /* It is unlinked */
        res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
                  &shmem_file_operations);
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        return res;
 
-put_dentry:
-       path_put(&path);
 put_memory:
        shmem_unacct_size(flags, size);
+put_path:
+       path_put(&path);
        return res;
 }