fs: move i_sb_list out from under inode_lock
[cascardo/linux.git] / fs / inode.c
index 0647d80..785b1ab 100644 (file)
 #include <linux/async.h>
 #include <linux/posix_acl.h>
 #include <linux/ima.h>
+#include <linux/cred.h>
+
+/*
+ * inode locking rules.
+ *
+ * inode->i_lock protects:
+ *   inode->i_state, inode->i_hash, __iget()
+ * inode_lru_lock protects:
+ *   inode_lru, inode->i_lru
+ * inode_sb_list_lock protects:
+ *   sb->s_inodes, inode->i_sb_list
+ *
+ * Lock ordering:
+ * inode_lock
+ *   inode->i_lock
+ *
+ * inode_sb_list_lock
+ *   inode->i_lock
+ *     inode_lru_lock
+ */
 
 /*
  * This is needed for the following functions:
@@ -73,6 +93,7 @@ static unsigned int i_hash_shift __read_mostly;
  */
 
 static LIST_HEAD(inode_lru);
+static DEFINE_SPINLOCK(inode_lru_lock);
 static struct hlist_head *inode_hashtable __read_mostly;
 
 /*
@@ -83,17 +104,16 @@ static struct hlist_head *inode_hashtable __read_mostly;
  */
 DEFINE_SPINLOCK(inode_lock);
 
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
+
 /*
- * iprune_sem provides exclusion between the kswapd or try_to_free_pages
- * icache shrinking path, and the umount path.  Without this exclusion,
- * by the time prune_icache calls iput for the inode whose pages it has
- * been invalidating, or by the time it calls clear_inode & destroy_inode
- * from its final dispose_list, the struct super_block they refer to
- * (for inode->i_sb->s_op) may already have been freed and reused.
+ * iprune_sem provides exclusion between the icache shrinking and the
+ * umount path.
  *
- * We make this an rwsem because the fastpath is icache shrinking. In
- * some cases a filesystem may be doing a significant amount of work in
- * its inode reclaim code, so this should improve parallelism.
+ * We don't actually need it to protect anything in the umount path,
+ * but only need to cycle through it to make sure any inode that
+ * prune_icache took off the LRU list has been fully torn down by the
+ * time we are past evict_inodes.
  */
 static DECLARE_RWSEM(iprune_sem);
 
@@ -139,15 +159,6 @@ int proc_nr_inodes(ctl_table *table, int write,
 }
 #endif
 
-static void wake_up_inode(struct inode *inode)
-{
-       /*
-        * Prevent speculative execution through spin_unlock(&inode_lock);
-        */
-       smp_mb();
-       wake_up_bit(&inode->i_state, __I_NEW);
-}
-
 /**
  * inode_init_always - perform inode structure intialisation
  * @sb: superblock inode belongs to
@@ -338,7 +349,7 @@ static void init_once(void *foo)
 }
 
 /*
- * inode_lock must be held
+ * inode->i_lock must be held
  */
 void __iget(struct inode *inode)
 {
@@ -356,23 +367,22 @@ EXPORT_SYMBOL(ihold);
 
 static void inode_lru_list_add(struct inode *inode)
 {
+       spin_lock(&inode_lru_lock);
        if (list_empty(&inode->i_lru)) {
                list_add(&inode->i_lru, &inode_lru);
                inodes_stat.nr_unused++;
        }
+       spin_unlock(&inode_lru_lock);
 }
 
 static void inode_lru_list_del(struct inode *inode)
 {
+       spin_lock(&inode_lru_lock);
        if (!list_empty(&inode->i_lru)) {
                list_del_init(&inode->i_lru);
                inodes_stat.nr_unused--;
        }
-}
-
-static inline void __inode_sb_list_add(struct inode *inode)
-{
-       list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+       spin_unlock(&inode_lru_lock);
 }
 
 /**
@@ -381,15 +391,17 @@ static inline void __inode_sb_list_add(struct inode *inode)
  */
 void inode_sb_list_add(struct inode *inode)
 {
-       spin_lock(&inode_lock);
-       __inode_sb_list_add(inode);
-       spin_unlock(&inode_lock);
+       spin_lock(&inode_sb_list_lock);
+       list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+       spin_unlock(&inode_sb_list_lock);
 }
 EXPORT_SYMBOL_GPL(inode_sb_list_add);
 
-static inline void __inode_sb_list_del(struct inode *inode)
+static inline void inode_sb_list_del(struct inode *inode)
 {
+       spin_lock(&inode_sb_list_lock);
        list_del_init(&inode->i_sb_list);
+       spin_unlock(&inode_sb_list_lock);
 }
 
 static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -415,22 +427,13 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
        struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
 
        spin_lock(&inode_lock);
+       spin_lock(&inode->i_lock);
        hlist_add_head(&inode->i_hash, b);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&inode_lock);
 }
 EXPORT_SYMBOL(__insert_inode_hash);
 
-/**
- *     __remove_inode_hash - remove an inode from the hash
- *     @inode: inode to unhash
- *
- *     Remove an inode from the superblock.
- */
-static void __remove_inode_hash(struct inode *inode)
-{
-       hlist_del_init(&inode->i_hash);
-}
-
 /**
  *     remove_inode_hash - remove an inode from the hash
  *     @inode: inode to unhash
@@ -440,7 +443,9 @@ static void __remove_inode_hash(struct inode *inode)
 void remove_inode_hash(struct inode *inode)
 {
        spin_lock(&inode_lock);
+       spin_lock(&inode->i_lock);
        hlist_del_init(&inode->i_hash);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&inode_lock);
 }
 EXPORT_SYMBOL(remove_inode_hash);
@@ -458,10 +463,32 @@ void end_writeback(struct inode *inode)
 }
 EXPORT_SYMBOL(end_writeback);
 
+/*
+ * Free the inode passed in, removing it from the lists it is still connected
+ * to. We remove any pages still attached to the inode and wait for any IO that
+ * is still in progress before finally destroying the inode.
+ *
+ * An inode must already be marked I_FREEING so that we avoid the inode being
+ * moved back onto lists if we race with other code that manipulates the lists
+ * (e.g. writeback_single_inode). The caller is responsible for setting this.
+ *
+ * An inode must already be removed from the LRU list before being evicted from
+ * the cache. This should occur atomically with setting the I_FREEING state
+ * flag, so no inodes here should ever be on the LRU when being evicted.
+ */
 static void evict(struct inode *inode)
 {
        const struct super_operations *op = inode->i_sb->s_op;
 
+       BUG_ON(!(inode->i_state & I_FREEING));
+       BUG_ON(!list_empty(&inode->i_lru));
+
+       spin_lock(&inode_lock);
+       list_del_init(&inode->i_wb_list);
+       spin_unlock(&inode_lock);
+
+       inode_sb_list_del(inode);
+
        if (op->evict_inode) {
                op->evict_inode(inode);
        } else {
@@ -473,6 +500,15 @@ static void evict(struct inode *inode)
                bd_forget(inode);
        if (S_ISCHR(inode->i_mode) && inode->i_cdev)
                cd_forget(inode);
+
+       remove_inode_hash(inode);
+
+       spin_lock(&inode->i_lock);
+       wake_up_bit(&inode->i_state, __I_NEW);
+       BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
+       spin_unlock(&inode->i_lock);
+
+       destroy_inode(inode);
 }
 
 /*
@@ -491,14 +527,6 @@ static void dispose_list(struct list_head *head)
                list_del_init(&inode->i_lru);
 
                evict(inode);
-
-               spin_lock(&inode_lock);
-               __remove_inode_hash(inode);
-               __inode_sb_list_del(inode);
-               spin_unlock(&inode_lock);
-
-               wake_up_inode(inode);
-               destroy_inode(inode);
        }
 }
 
@@ -516,32 +544,32 @@ void evict_inodes(struct super_block *sb)
        struct inode *inode, *next;
        LIST_HEAD(dispose);
 
-       down_write(&iprune_sem);
-
-       spin_lock(&inode_lock);
+       spin_lock(&inode_sb_list_lock);
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
                if (atomic_read(&inode->i_count))
                        continue;
 
+               spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
-                       WARN_ON(1);
+                       spin_unlock(&inode->i_lock);
                        continue;
                }
 
                inode->i_state |= I_FREEING;
-
-               /*
-                * Move the inode off the IO lists and LRU once I_FREEING is
-                * set so that it won't get moved back on there if it is dirty.
-                */
-               list_move(&inode->i_lru, &dispose);
-               list_del_init(&inode->i_wb_list);
-               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-                       inodes_stat.nr_unused--;
+               inode_lru_list_del(inode);
+               spin_unlock(&inode->i_lock);
+               list_add(&inode->i_lru, &dispose);
        }
-       spin_unlock(&inode_lock);
+       spin_unlock(&inode_sb_list_lock);
 
        dispose_list(&dispose);
+
+       /*
+        * Cycle through iprune_sem to make sure any inode that prune_icache
+        * moved off the list before we took the lock has been fully torn
+        * down.
+        */
+       down_write(&iprune_sem);
        up_write(&iprune_sem);
 }
 
@@ -561,36 +589,32 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
        struct inode *inode, *next;
        LIST_HEAD(dispose);
 
-       down_write(&iprune_sem);
-
-       spin_lock(&inode_lock);
+       spin_lock(&inode_sb_list_lock);
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
-               if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+               spin_lock(&inode->i_lock);
+               if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+                       spin_unlock(&inode->i_lock);
                        continue;
+               }
                if (inode->i_state & I_DIRTY && !kill_dirty) {
+                       spin_unlock(&inode->i_lock);
                        busy = 1;
                        continue;
                }
                if (atomic_read(&inode->i_count)) {
+                       spin_unlock(&inode->i_lock);
                        busy = 1;
                        continue;
                }
 
                inode->i_state |= I_FREEING;
-
-               /*
-                * Move the inode off the IO lists and LRU once I_FREEING is
-                * set so that it won't get moved back on there if it is dirty.
-                */
-               list_move(&inode->i_lru, &dispose);
-               list_del_init(&inode->i_wb_list);
-               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-                       inodes_stat.nr_unused--;
+               inode_lru_list_del(inode);
+               spin_unlock(&inode->i_lock);
+               list_add(&inode->i_lru, &dispose);
        }
-       spin_unlock(&inode_lock);
+       spin_unlock(&inode_sb_list_lock);
 
        dispose_list(&dispose);
-       up_write(&iprune_sem);
 
        return busy;
 }
@@ -610,7 +634,7 @@ static int can_unuse(struct inode *inode)
 
 /*
  * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
- * temporary list and then are freed outside inode_lock by dispose_list().
+ * temporary list and then are freed outside inode_lru_lock by dispose_list().
  *
  * Any inodes which are pinned purely because of attached pagecache have their
  * pagecache removed.  If the inode has metadata buffers attached to
@@ -631,7 +655,7 @@ static void prune_icache(int nr_to_scan)
        unsigned long reap = 0;
 
        down_read(&iprune_sem);
-       spin_lock(&inode_lock);
+       spin_lock(&inode_lru_lock);
        for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
                struct inode *inode;
 
@@ -640,6 +664,16 @@ static void prune_icache(int nr_to_scan)
 
                inode = list_entry(inode_lru.prev, struct inode, i_lru);
 
+               /*
+                * we are inverting the inode_lru_lock/inode->i_lock here,
+                * so use a trylock. If we fail to get the lock, just move the
+                * inode to the back of the list so we don't spin on it.
+                */
+               if (!spin_trylock(&inode->i_lock)) {
+                       list_move(&inode->i_lru, &inode_lru);
+                       continue;
+               }
+
                /*
                 * Referenced or dirty inodes are still in use. Give them
                 * another pass through the LRU as we canot reclaim them now.
@@ -647,47 +681,51 @@ static void prune_icache(int nr_to_scan)
                if (atomic_read(&inode->i_count) ||
                    (inode->i_state & ~I_REFERENCED)) {
                        list_del_init(&inode->i_lru);
+                       spin_unlock(&inode->i_lock);
                        inodes_stat.nr_unused--;
                        continue;
                }
 
                /* recently referenced inodes get one more pass */
                if (inode->i_state & I_REFERENCED) {
-                       list_move(&inode->i_lru, &inode_lru);
                        inode->i_state &= ~I_REFERENCED;
+                       list_move(&inode->i_lru, &inode_lru);
+                       spin_unlock(&inode->i_lock);
                        continue;
                }
                if (inode_has_buffers(inode) || inode->i_data.nrpages) {
                        __iget(inode);
-                       spin_unlock(&inode_lock);
+                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&inode_lru_lock);
                        if (remove_inode_buffers(inode))
                                reap += invalidate_mapping_pages(&inode->i_data,
                                                                0, -1);
                        iput(inode);
-                       spin_lock(&inode_lock);
+                       spin_lock(&inode_lru_lock);
 
                        if (inode != list_entry(inode_lru.next,
                                                struct inode, i_lru))
                                continue;       /* wrong inode or list_empty */
-                       if (!can_unuse(inode))
+                       /* avoid lock inversions with trylock */
+                       if (!spin_trylock(&inode->i_lock))
                                continue;
+                       if (!can_unuse(inode)) {
+                               spin_unlock(&inode->i_lock);
+                               continue;
+                       }
                }
                WARN_ON(inode->i_state & I_NEW);
                inode->i_state |= I_FREEING;
+               spin_unlock(&inode->i_lock);
 
-               /*
-                * Move the inode off the IO lists and LRU once I_FREEING is
-                * set so that it won't get moved back on there if it is dirty.
-                */
                list_move(&inode->i_lru, &freeable);
-               list_del_init(&inode->i_wb_list);
                inodes_stat.nr_unused--;
        }
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_INODESTEAL, reap);
        else
                __count_vm_events(PGINODESTEAL, reap);
-       spin_unlock(&inode_lock);
+       spin_unlock(&inode_lru_lock);
 
        dispose_list(&freeable);
        up_read(&iprune_sem);
@@ -740,11 +778,13 @@ repeat:
                        continue;
                if (!test(inode, data))
                        continue;
+               spin_lock(&inode->i_lock);
                if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
                __iget(inode);
+               spin_unlock(&inode->i_lock);
                return inode;
        }
        return NULL;
@@ -766,11 +806,13 @@ repeat:
                        continue;
                if (inode->i_sb != sb)
                        continue;
+               spin_lock(&inode->i_lock);
                if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
                __iget(inode);
+               spin_unlock(&inode->i_lock);
                return inode;
        }
        return NULL;
@@ -830,19 +872,26 @@ struct inode *new_inode(struct super_block *sb)
 {
        struct inode *inode;
 
-       spin_lock_prefetch(&inode_lock);
+       spin_lock_prefetch(&inode_sb_list_lock);
 
        inode = alloc_inode(sb);
        if (inode) {
-               spin_lock(&inode_lock);
-               __inode_sb_list_add(inode);
+               spin_lock(&inode->i_lock);
                inode->i_state = 0;
-               spin_unlock(&inode_lock);
+               spin_unlock(&inode->i_lock);
+               inode_sb_list_add(inode);
        }
        return inode;
 }
 EXPORT_SYMBOL(new_inode);
 
+/**
+ * unlock_new_inode - clear the I_NEW state and wake up any waiters
+ * @inode:     new inode to unlock
+ *
+ * Called when the inode is fully initialised to clear the new state of the
+ * inode and wake up anyone waiting for the inode to finish initialisation.
+ */
 void unlock_new_inode(struct inode *inode)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -862,19 +911,11 @@ void unlock_new_inode(struct inode *inode)
                }
        }
 #endif
-       /*
-        * This is special!  We do not need the spinlock when clearing I_NEW,
-        * because we're guaranteed that nobody else tries to do anything about
-        * the state of the inode when it is locked, as we just created it (so
-        * there can be no old holders that haven't tested I_NEW).
-        * However we must emit the memory barrier so that other CPUs reliably
-        * see the clearing of I_NEW after the other inode initialisation has
-        * completed.
-        */
-       smp_mb();
+       spin_lock(&inode->i_lock);
        WARN_ON(!(inode->i_state & I_NEW));
        inode->i_state &= ~I_NEW;
-       wake_up_inode(inode);
+       wake_up_bit(&inode->i_state, __I_NEW);
+       spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL(unlock_new_inode);
 
@@ -903,9 +944,11 @@ static struct inode *get_new_inode(struct super_block *sb,
                        if (set(inode, data))
                                goto set_failed;
 
-                       hlist_add_head(&inode->i_hash, head);
-                       __inode_sb_list_add(inode);
+                       spin_lock(&inode->i_lock);
                        inode->i_state = I_NEW;
+                       hlist_add_head(&inode->i_hash, head);
+                       spin_unlock(&inode->i_lock);
+                       inode_sb_list_add(inode);
                        spin_unlock(&inode_lock);
 
                        /* Return the locked inode with I_NEW set, the
@@ -950,9 +993,11 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
                old = find_inode_fast(sb, head, ino);
                if (!old) {
                        inode->i_ino = ino;
-                       hlist_add_head(&inode->i_hash, head);
-                       __inode_sb_list_add(inode);
+                       spin_lock(&inode->i_lock);
                        inode->i_state = I_NEW;
+                       hlist_add_head(&inode->i_hash, head);
+                       spin_unlock(&inode->i_lock);
+                       inode_sb_list_add(inode);
                        spin_unlock(&inode_lock);
 
                        /* Return the locked inode with I_NEW set, the
@@ -1036,17 +1081,19 @@ EXPORT_SYMBOL(iunique);
 
 struct inode *igrab(struct inode *inode)
 {
-       spin_lock(&inode_lock);
-       if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
+       spin_lock(&inode->i_lock);
+       if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
                __iget(inode);
-       else
+               spin_unlock(&inode->i_lock);
+       } else {
+               spin_unlock(&inode->i_lock);
                /*
                 * Handle the case where s_op->clear_inode is not been
                 * called yet, and somebody is calling igrab
                 * while the inode is getting freed.
                 */
                inode = NULL;
-       spin_unlock(&inode_lock);
+       }
        return inode;
 }
 EXPORT_SYMBOL(igrab);
@@ -1274,7 +1321,6 @@ int insert_inode_locked(struct inode *inode)
        ino_t ino = inode->i_ino;
        struct hlist_head *head = inode_hashtable + hash(sb, ino);
 
-       inode->i_state |= I_NEW;
        while (1) {
                struct hlist_node *node;
                struct inode *old = NULL;
@@ -1284,16 +1330,23 @@ int insert_inode_locked(struct inode *inode)
                                continue;
                        if (old->i_sb != sb)
                                continue;
-                       if (old->i_state & (I_FREEING|I_WILL_FREE))
+                       spin_lock(&old->i_lock);
+                       if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+                               spin_unlock(&old->i_lock);
                                continue;
+                       }
                        break;
                }
                if (likely(!node)) {
+                       spin_lock(&inode->i_lock);
+                       inode->i_state |= I_NEW;
                        hlist_add_head(&inode->i_hash, head);
+                       spin_unlock(&inode->i_lock);
                        spin_unlock(&inode_lock);
                        return 0;
                }
                __iget(old);
+               spin_unlock(&old->i_lock);
                spin_unlock(&inode_lock);
                wait_on_inode(old);
                if (unlikely(!inode_unhashed(old))) {
@@ -1311,8 +1364,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
        struct super_block *sb = inode->i_sb;
        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 
-       inode->i_state |= I_NEW;
-
        while (1) {
                struct hlist_node *node;
                struct inode *old = NULL;
@@ -1323,16 +1374,23 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
                                continue;
                        if (!test(old, data))
                                continue;
-                       if (old->i_state & (I_FREEING|I_WILL_FREE))
+                       spin_lock(&old->i_lock);
+                       if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+                               spin_unlock(&old->i_lock);
                                continue;
+                       }
                        break;
                }
                if (likely(!node)) {
+                       spin_lock(&inode->i_lock);
+                       inode->i_state |= I_NEW;
                        hlist_add_head(&inode->i_hash, head);
+                       spin_unlock(&inode->i_lock);
                        spin_unlock(&inode_lock);
                        return 0;
                }
                __iget(old);
+               spin_unlock(&old->i_lock);
                spin_unlock(&inode_lock);
                wait_on_inode(old);
                if (unlikely(!inode_unhashed(old))) {
@@ -1378,47 +1436,35 @@ static void iput_final(struct inode *inode)
        const struct super_operations *op = inode->i_sb->s_op;
        int drop;
 
+       WARN_ON(inode->i_state & I_NEW);
+
        if (op && op->drop_inode)
                drop = op->drop_inode(inode);
        else
                drop = generic_drop_inode(inode);
 
+       if (!drop && (sb->s_flags & MS_ACTIVE)) {
+               inode->i_state |= I_REFERENCED;
+               if (!(inode->i_state & (I_DIRTY|I_SYNC)))
+                       inode_lru_list_add(inode);
+               spin_unlock(&inode->i_lock);
+               return;
+       }
+
        if (!drop) {
-               if (sb->s_flags & MS_ACTIVE) {
-                       inode->i_state |= I_REFERENCED;
-                       if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
-                               inode_lru_list_add(inode);
-                       }
-                       spin_unlock(&inode_lock);
-                       return;
-               }
-               WARN_ON(inode->i_state & I_NEW);
                inode->i_state |= I_WILL_FREE;
-               spin_unlock(&inode_lock);
+               spin_unlock(&inode->i_lock);
                write_inode_now(inode, 1);
-               spin_lock(&inode_lock);
+               spin_lock(&inode->i_lock);
                WARN_ON(inode->i_state & I_NEW);
                inode->i_state &= ~I_WILL_FREE;
-               __remove_inode_hash(inode);
        }
 
-       WARN_ON(inode->i_state & I_NEW);
        inode->i_state |= I_FREEING;
-
-       /*
-        * Move the inode off the IO lists and LRU once I_FREEING is
-        * set so that it won't get moved back on there if it is dirty.
-        */
        inode_lru_list_del(inode);
-       list_del_init(&inode->i_wb_list);
+       spin_unlock(&inode->i_lock);
 
-       __inode_sb_list_del(inode);
-       spin_unlock(&inode_lock);
        evict(inode);
-       remove_inode_hash(inode);
-       wake_up_inode(inode);
-       BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
-       destroy_inode(inode);
 }
 
 /**
@@ -1435,7 +1481,7 @@ void iput(struct inode *inode)
        if (inode) {
                BUG_ON(inode->i_state & I_CLEAR);
 
-               if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
+               if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
                        iput_final(inode);
        }
 }
@@ -1614,9 +1660,8 @@ EXPORT_SYMBOL(inode_wait);
  * to recheck inode state.
  *
  * It doesn't matter if I_NEW is not set initially, a call to
- * wake_up_inode() after removing from the hash list will DTRT.
- *
- * This is called with inode_lock held.
+ * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
+ * will DTRT.
  */
 static void __wait_on_freeing_inode(struct inode *inode)
 {
@@ -1624,6 +1669,7 @@ static void __wait_on_freeing_inode(struct inode *inode)
        DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
        wq = bit_waitqueue(&inode->i_state, __I_NEW);
        prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&inode_lock);
        schedule();
        finish_wait(wq, &wait.wait);
@@ -1719,7 +1765,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
 EXPORT_SYMBOL(init_special_inode);
 
 /**
- * Init uid,gid,mode for new inode according to posix standards
+ * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
  * @inode: New inode
  * @dir: Directory inode
  * @mode: mode of the new inode
@@ -1737,3 +1783,22 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
        inode->i_mode = mode;
 }
 EXPORT_SYMBOL(inode_init_owner);
+
+/**
+ * inode_owner_or_capable - check current task permissions to inode
+ * @inode: inode being checked
+ *
+ * Return true if current either has CAP_FOWNER to the inode, or
+ * owns the file.
+ */
+bool inode_owner_or_capable(const struct inode *inode)
+{
+       struct user_namespace *ns = inode_userns(inode);
+
+       if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
+               return true;
+       if (ns_capable(ns, CAP_FOWNER))
+               return true;
+       return false;
+}
+EXPORT_SYMBOL(inode_owner_or_capable);