Merge tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[cascardo/linux.git] / mm / shmem.c
index 8f419cf..a42add1 100644 (file)
@@ -66,6 +66,9 @@ static struct vfsmount *shm_mnt;
 #include <linux/highmem.h>
 #include <linux/seq_file.h>
 #include <linux/magic.h>
+#include <linux/syscalls.h>
+#include <linux/fcntl.h>
+#include <uapi/linux/memfd.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -85,7 +88,7 @@ static struct vfsmount *shm_mnt;
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
-       int     mode;           /* FALLOC_FL mode currently operating */
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -149,6 +152,19 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
                vm_unacct_memory(VM_ACCT(size));
 }
 
+static inline int shmem_reacct_size(unsigned long flags,
+               loff_t oldsize, loff_t newsize)
+{
+       if (!(flags & VM_NORESERVE)) {
+               if (VM_ACCT(newsize) > VM_ACCT(oldsize))
+                       return security_vm_enough_memory_mm(current->mm,
+                                       VM_ACCT(newsize) - VM_ACCT(oldsize));
+               else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
+                       vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
+       }
+       return 0;
+}
+
 /*
  * ... whereas tmpfs objects are accounted incrementally as
  * pages are allocated, in order to allow huge sparse files.
@@ -280,7 +296,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
  */
 static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
-                                  pgoff_t index, gfp_t gfp, void *expected)
+                                  pgoff_t index, void *expected)
 {
        int error;
 
@@ -406,7 +422,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        pvec.pages, indices);
                if (!pvec.nr)
                        break;
-               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
@@ -434,7 +449,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                }
                pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
-               mem_cgroup_uncharge_end();
                cond_resched();
                index++;
        }
@@ -468,24 +482,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
-               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
@@ -496,8 +506,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -506,13 +520,17 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
                }
                pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
-               mem_cgroup_uncharge_end();
                index++;
        }
 
@@ -532,6 +550,7 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
+       struct shmem_inode_info *info = SHMEM_I(inode);
        int error;
 
        error = inode_change_ok(inode, attr);
@@ -542,7 +561,16 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
+               /* protected by i_mutex */
+               if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
+                   (newsize > oldsize && (info->seals & F_SEAL_GROW)))
+                       return -EPERM;
+
                if (newsize != oldsize) {
+                       error = shmem_reacct_size(SHMEM_I(inode)->flags,
+                                       oldsize, newsize);
+                       if (error)
+                               return error;
                        i_size_write(inode, newsize);
                        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
                }
@@ -598,7 +626,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
        radswap = swp_to_radix_entry(swap);
        index = radix_tree_locate_item(&mapping->page_tree, radswap);
        if (index == -1)
-               return 0;
+               return -EAGAIN; /* tell shmem_unuse we found nothing */
 
        /*
         * Move _head_ to start search for next from here.
@@ -643,7 +671,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
         */
        if (!error)
                error = shmem_add_to_page_cache(*pagep, mapping, index,
-                                               GFP_NOWAIT, radswap);
+                                               radswap);
        if (error != -ENOMEM) {
                /*
                 * Truncation and eviction use free_swap_and_cache(), which
@@ -657,7 +685,6 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
                        spin_unlock(&info->lock);
                        swap_free(swap);
                }
-               error = 1;      /* not an error, but entry was found */
        }
        return error;
 }
@@ -669,7 +696,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
 {
        struct list_head *this, *next;
        struct shmem_inode_info *info;
-       int found = 0;
+       struct mem_cgroup *memcg;
        int error = 0;
 
        /*
@@ -684,26 +711,32 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
         * the shmem_swaplist_mutex which might hold up shmem_writepage().
         * Charged back to the user (not to caller) when swap account is used.
         */
-       error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL);
+       error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg);
        if (error)
                goto out;
        /* No radix_tree_preload: swap entry keeps a place for page in tree */
+       error = -EAGAIN;
 
        mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(this, next, &shmem_swaplist) {
                info = list_entry(this, struct shmem_inode_info, swaplist);
                if (info->swapped)
-                       found = shmem_unuse_inode(info, swap, &page);
+                       error = shmem_unuse_inode(info, swap, &page);
                else
                        list_del_init(&info->swaplist);
                cond_resched();
-               if (found)
+               if (error != -EAGAIN)
                        break;
+               /* found nothing in this: move on to search the next */
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (found < 0)
-               error = found;
+       if (error) {
+               if (error != -ENOMEM)
+                       error = 0;
+               mem_cgroup_cancel_charge(page, memcg);
+       } else
+               mem_cgroup_commit_charge(page, memcg, true);
 out:
        unlock_page(page);
        page_cache_release(page);
@@ -760,7 +793,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
-                           !shmem_falloc->mode &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -807,7 +840,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        }
 
        mutex_unlock(&shmem_swaplist_mutex);
-       swapcache_free(swap, NULL);
+       swapcache_free(swap);
 redirty:
        set_page_dirty(page);
        if (wbc->for_reclaim)
@@ -980,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                 */
                oldpage = newpage;
        } else {
-               mem_cgroup_replace_page_cache(oldpage, newpage);
+               mem_cgroup_migrate(oldpage, newpage, false);
                lru_cache_add_anon(newpage);
                *pagep = newpage;
        }
@@ -1007,6 +1040,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info;
        struct shmem_sb_info *sbinfo;
+       struct mem_cgroup *memcg;
        struct page *page;
        swp_entry_t swap;
        int error;
@@ -1029,6 +1063,9 @@ repeat:
                goto failed;
        }
 
+       if (page && sgp == SGP_WRITE)
+               mark_page_accessed(page);
+
        /* fallocated page? */
        if (page && !PageUptodate(page)) {
                if (sgp != SGP_READ)
@@ -1082,11 +1119,10 @@ repeat:
                                goto failed;
                }
 
-               error = mem_cgroup_charge_file(page, current->mm,
-                                               gfp & GFP_RECLAIM_MASK);
+               error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, swp_to_radix_entry(swap));
+                                               swp_to_radix_entry(swap));
                        /*
                         * We already confirmed swap under page lock, and make
                         * no memory allocation here, so usually no possibility
@@ -1099,17 +1135,24 @@ repeat:
                         * Reset swap.val? No, leave it so "failed" goes back to
                         * "repeat": reading a hole and writing should succeed.
                         */
-                       if (error)
+                       if (error) {
+                               mem_cgroup_cancel_charge(page, memcg);
                                delete_from_swap_cache(page);
+                       }
                }
                if (error)
                        goto failed;
 
+               mem_cgroup_commit_charge(page, memcg, true);
+
                spin_lock(&info->lock);
                info->swapped--;
                shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
 
+               if (sgp == SGP_WRITE)
+                       mark_page_accessed(page);
+
                delete_from_swap_cache(page);
                set_page_dirty(page);
                swap_free(swap);
@@ -1136,20 +1179,23 @@ repeat:
 
                __SetPageSwapBacked(page);
                __set_page_locked(page);
-               error = mem_cgroup_charge_file(page, current->mm,
-                                               gfp & GFP_RECLAIM_MASK);
+               if (sgp == SGP_WRITE)
+                       __SetPageReferenced(page);
+
+               error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
                if (error)
                        goto decused;
                error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                                       gfp, NULL);
+                                                       NULL);
                        radix_tree_preload_end();
                }
                if (error) {
-                       mem_cgroup_uncharge_cache_page(page);
+                       mem_cgroup_cancel_charge(page, memcg);
                        goto decused;
                }
+               mem_cgroup_commit_charge(page, memcg, false);
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -1239,38 +1285,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
         * locks writers out with its hold on i_mutex.  So refrain from
-        * faulting pages into the hole while it's being punched, and
-        * wait on i_mutex to be released if vmf->flags permits.
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
                struct shmem_falloc *shmem_falloc;
 
                spin_lock(&inode->i_lock);
                shmem_falloc = inode->i_private;
-               if (!shmem_falloc ||
-                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
-                   vmf->pgoff < shmem_falloc->start ||
-                   vmf->pgoff >= shmem_falloc->next)
-                       shmem_falloc = NULL;
-               spin_unlock(&inode->i_lock);
-               /*
-                * i_lock has protected us from taking shmem_falloc seriously
-                * once return from shmem_fallocate() went back up that stack.
-                * i_lock does not serialize with i_mutex at all, but it does
-                * not matter if sometimes we wait unnecessarily, or sometimes
-                * miss out on waiting: we just need to make those cases rare.
-                */
-               if (shmem_falloc) {
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
                           !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
                                up_read(&vma->vm_mm->mmap_sem);
-                               mutex_lock(&inode->i_mutex);
-                               mutex_unlock(&inode->i_mutex);
-                               return VM_FAULT_RETRY;
+                               ret = VM_FAULT_RETRY;
                        }
-                       /* cond_resched? Leave that to GUP or return to user */
-                       return VM_FAULT_NOPAGE;
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
                }
+               spin_unlock(&inode->i_lock);
        }
 
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1355,6 +1421,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
+               info->seals = F_SEAL_SEAL;
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->swaplist);
                simple_xattrs_init(&info->xattrs);
@@ -1412,13 +1479,19 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
-       int ret;
        struct inode *inode = mapping->host;
+       struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
-       if (ret == 0 && *pagep)
-               init_page_accessed(*pagep);
-       return ret;
+
+       /* i_mutex is held by caller */
+       if (unlikely(info->seals)) {
+               if (info->seals & F_SEAL_WRITE)
+                       return -EPERM;
+               if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
+                       return -EPERM;
+       }
+
+       return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
 }
 
 static int
@@ -1755,11 +1828,233 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
        return offset;
 }
 
+/*
+ * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * so reuse a tag which we firmly believe is never set or cleared on shmem.
+ */
+#define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
+#define LAST_SCAN               4       /* about 150ms max */
+
+static void shmem_tag_pins(struct address_space *mapping)
+{
+       struct radix_tree_iter iter;
+       void **slot;
+       pgoff_t start;
+       struct page *page;
+
+       lru_add_drain();
+       start = 0;
+       rcu_read_lock();
+
+restart:
+       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+               page = radix_tree_deref_slot(slot);
+               if (!page || radix_tree_exception(page)) {
+                       if (radix_tree_deref_retry(page))
+                               goto restart;
+               } else if (page_count(page) - page_mapcount(page) > 1) {
+                       spin_lock_irq(&mapping->tree_lock);
+                       radix_tree_tag_set(&mapping->page_tree, iter.index,
+                                          SHMEM_TAG_PINNED);
+                       spin_unlock_irq(&mapping->tree_lock);
+               }
+
+               if (need_resched()) {
+                       cond_resched_rcu();
+                       start = iter.index + 1;
+                       goto restart;
+               }
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
+ * via get_user_pages(), drivers might have some pending I/O without any active
+ * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
+ * and see whether it has an elevated ref-count. If so, we tag them and wait for
+ * them to be dropped.
+ * The caller must guarantee that no new user will acquire writable references
+ * to those pages to avoid races.
+ */
+static int shmem_wait_for_pins(struct address_space *mapping)
+{
+       struct radix_tree_iter iter;
+       void **slot;
+       pgoff_t start;
+       struct page *page;
+       int error, scan;
+
+       shmem_tag_pins(mapping);
+
+       error = 0;
+       for (scan = 0; scan <= LAST_SCAN; scan++) {
+               if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
+                       break;
+
+               if (!scan)
+                       lru_add_drain_all();
+               else if (schedule_timeout_killable((HZ << scan) / 200))
+                       scan = LAST_SCAN;
+
+               start = 0;
+               rcu_read_lock();
+restart:
+               radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
+                                          start, SHMEM_TAG_PINNED) {
+
+                       page = radix_tree_deref_slot(slot);
+                       if (radix_tree_exception(page)) {
+                               if (radix_tree_deref_retry(page))
+                                       goto restart;
+
+                               page = NULL;
+                       }
+
+                       if (page &&
+                           page_count(page) - page_mapcount(page) != 1) {
+                               if (scan < LAST_SCAN)
+                                       goto continue_resched;
+
+                               /*
+                                * On the last scan, we clean up all those tags
+                                * we inserted; but make a note that we still
+                                * found pages pinned.
+                                */
+                               error = -EBUSY;
+                       }
+
+                       spin_lock_irq(&mapping->tree_lock);
+                       radix_tree_tag_clear(&mapping->page_tree,
+                                            iter.index, SHMEM_TAG_PINNED);
+                       spin_unlock_irq(&mapping->tree_lock);
+continue_resched:
+                       if (need_resched()) {
+                               cond_resched_rcu();
+                               start = iter.index + 1;
+                               goto restart;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       return error;
+}
+
+#define F_ALL_SEALS (F_SEAL_SEAL | \
+                    F_SEAL_SHRINK | \
+                    F_SEAL_GROW | \
+                    F_SEAL_WRITE)
+
+int shmem_add_seals(struct file *file, unsigned int seals)
+{
+       struct inode *inode = file_inode(file);
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       int error;
+
+       /*
+        * SEALING
+        * Sealing allows multiple parties to share a shmem-file but restrict
+        * access to a specific subset of file operations. Seals can only be
+        * added, but never removed. This way, mutually untrusted parties can
+        * share common memory regions with a well-defined policy. A malicious
+        * peer can thus never perform unwanted operations on a shared object.
+        *
+        * Seals are only supported on special shmem-files and always affect
+        * the whole underlying inode. Once a seal is set, it may prevent some
+        * kinds of access to the file. Currently, the following seals are
+        * defined:
+        *   SEAL_SEAL: Prevent further seals from being set on this file
+        *   SEAL_SHRINK: Prevent the file from shrinking
+        *   SEAL_GROW: Prevent the file from growing
+        *   SEAL_WRITE: Prevent write access to the file
+        *
+        * As we don't require any trust relationship between two parties, we
+        * must prevent seals from being removed. Therefore, sealing a file
+        * only adds a given set of seals to the file, it never touches
+        * existing seals. Furthermore, the "setting seals"-operation can be
+        * sealed itself, which basically prevents any further seal from being
+        * added.
+        *
+        * Semantics of sealing are only defined on volatile files. Only
+        * anonymous shmem files support sealing. More importantly, seals are
+        * never written to disk. Therefore, there's no plan to support it on
+        * other file types.
+        */
+
+       if (file->f_op != &shmem_file_operations)
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_WRITE))
+               return -EPERM;
+       if (seals & ~(unsigned int)F_ALL_SEALS)
+               return -EINVAL;
+
+       mutex_lock(&inode->i_mutex);
+
+       if (info->seals & F_SEAL_SEAL) {
+               error = -EPERM;
+               goto unlock;
+       }
+
+       if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
+               error = mapping_deny_writable(file->f_mapping);
+               if (error)
+                       goto unlock;
+
+               error = shmem_wait_for_pins(file->f_mapping);
+               if (error) {
+                       mapping_allow_writable(file->f_mapping);
+                       goto unlock;
+               }
+       }
+
+       info->seals |= seals;
+       error = 0;
+
+unlock:
+       mutex_unlock(&inode->i_mutex);
+       return error;
+}
+EXPORT_SYMBOL_GPL(shmem_add_seals);
+
+int shmem_get_seals(struct file *file)
+{
+       if (file->f_op != &shmem_file_operations)
+               return -EINVAL;
+
+       return SHMEM_I(file_inode(file))->seals;
+}
+EXPORT_SYMBOL_GPL(shmem_get_seals);
+
+long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long error;
+
+       switch (cmd) {
+       case F_ADD_SEALS:
+               /* disallow upper 32bit */
+               if (arg > UINT_MAX)
+                       return -EINVAL;
+
+               error = shmem_add_seals(file, arg);
+               break;
+       case F_GET_SEALS:
+               error = shmem_get_seals(file);
+               break;
+       default:
+               error = -EINVAL;
+               break;
+       }
+
+       return error;
+}
+
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
        struct inode *inode = file_inode(file);
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+       struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_falloc shmem_falloc;
        pgoff_t start, index, end;
        int error;
@@ -1769,13 +2064,19 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 
        mutex_lock(&inode->i_mutex);
 
-       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+               /* protected by i_mutex */
+               if (info->seals & F_SEAL_WRITE) {
+                       error = -EPERM;
+                       goto out;
+               }
 
+               shmem_falloc.waitq = &shmem_falloc_waitq;
                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
                spin_lock(&inode->i_lock);
@@ -1787,8 +2088,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
-               goto undone;
+               goto out;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1796,6 +2102,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        if (error)
                goto out;
 
+       if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
+               error = -EPERM;
+               goto out;
+       }
+
        start = offset >> PAGE_CACHE_SHIFT;
        end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        /* Try to avoid a swapstorm if len is impossible to satisfy */
@@ -1804,6 +2115,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
@@ -2530,6 +2842,77 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
        shmem_show_mpol(seq, sbinfo->mpol);
        return 0;
 }
+
+#define MFD_NAME_PREFIX "memfd:"
+#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
+#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
+
+#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
+
+SYSCALL_DEFINE2(memfd_create,
+               const char __user *, uname,
+               unsigned int, flags)
+{
+       struct shmem_inode_info *info;
+       struct file *file;
+       int fd, error;
+       char *name;
+       long len;
+
+       if (flags & ~(unsigned int)MFD_ALL_FLAGS)
+               return -EINVAL;
+
+       /* length includes terminating zero */
+       len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
+       if (len <= 0)
+               return -EFAULT;
+       if (len > MFD_NAME_MAX_LEN + 1)
+               return -EINVAL;
+
+       name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
+       if (!name)
+               return -ENOMEM;
+
+       strcpy(name, MFD_NAME_PREFIX);
+       if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
+               error = -EFAULT;
+               goto err_name;
+       }
+
+       /* terminating-zero may have changed after strnlen_user() returned */
+       if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
+               error = -EFAULT;
+               goto err_name;
+       }
+
+       fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
+       if (fd < 0) {
+               error = fd;
+               goto err_name;
+       }
+
+       file = shmem_file_setup(name, 0, VM_NORESERVE);
+       if (IS_ERR(file)) {
+               error = PTR_ERR(file);
+               goto err_fd;
+       }
+       info = SHMEM_I(file_inode(file));
+       file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+       file->f_flags |= O_RDWR | O_LARGEFILE;
+       if (flags & MFD_ALLOW_SEALING)
+               info->seals &= ~F_SEAL_SEAL;
+
+       fd_install(fd, file);
+       kfree(name);
+       return fd;
+
+err_fd:
+       put_unused_fd(fd);
+err_name:
+       kfree(name);
+       return error;
+}
+
 #endif /* CONFIG_TMPFS */
 
 static void shmem_put_super(struct super_block *sb)
@@ -2895,16 +3278,16 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        this.len = strlen(name);
        this.hash = 0; /* will go */
        sb = shm_mnt->mnt_sb;
+       path.mnt = mntget(shm_mnt);
        path.dentry = d_alloc_pseudo(sb, &this);
        if (!path.dentry)
                goto put_memory;
        d_set_d_op(path.dentry, &anon_ops);
-       path.mnt = mntget(shm_mnt);
 
        res = ERR_PTR(-ENOSPC);
        inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
        if (!inode)
-               goto put_dentry;
+               goto put_memory;
 
        inode->i_flags |= i_flags;
        d_instantiate(path.dentry, inode);
@@ -2912,19 +3295,19 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        clear_nlink(inode);     /* It is unlinked */
        res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
                  &shmem_file_operations);
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        return res;
 
-put_dentry:
-       path_put(&path);
 put_memory:
        shmem_unacct_size(flags, size);
+put_path:
+       path_put(&path);
        return res;
 }