Merge tag 'armsoc-tegra' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[cascardo/linux.git] / fs / btrfs / extent-tree.c
index c4661db..e2287c7 100644 (file)
@@ -33,6 +33,7 @@
 #include "raid56.h"
 #include "locking.h"
 #include "free-space-cache.h"
+#include "free-space-tree.h"
 #include "math.h"
 #include "sysfs.h"
 #include "qgroup.h"
@@ -357,8 +358,8 @@ static void fragment_free_space(struct btrfs_root *root,
  * we need to check the pinned_extents for any extents that can't be used yet
  * since their free space will be released as soon as the transaction commits.
  */
-static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
-                             struct btrfs_fs_info *info, u64 start, u64 end)
+u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
+                      struct btrfs_fs_info *info, u64 start, u64 end)
 {
        u64 extent_start, extent_end, size, total_added = 0;
        int ret;
@@ -395,11 +396,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
        return total_added;
 }
 
-static noinline void caching_thread(struct btrfs_work *work)
+static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 {
        struct btrfs_block_group_cache *block_group;
        struct btrfs_fs_info *fs_info;
-       struct btrfs_caching_control *caching_ctl;
        struct btrfs_root *extent_root;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
@@ -407,17 +407,16 @@ static noinline void caching_thread(struct btrfs_work *work)
        u64 total_found = 0;
        u64 last = 0;
        u32 nritems;
-       int ret = -ENOMEM;
+       int ret;
        bool wakeup = true;
 
-       caching_ctl = container_of(work, struct btrfs_caching_control, work);
        block_group = caching_ctl->block_group;
        fs_info = block_group->fs_info;
        extent_root = fs_info->extent_root;
 
        path = btrfs_alloc_path();
        if (!path)
-               goto out;
+               return -ENOMEM;
 
        last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
@@ -438,20 +437,16 @@ static noinline void caching_thread(struct btrfs_work *work)
         */
        path->skip_locking = 1;
        path->search_commit_root = 1;
-       path->reada = 1;
+       path->reada = READA_FORWARD;
 
        key.objectid = last;
        key.offset = 0;
        key.type = BTRFS_EXTENT_ITEM_KEY;
-again:
-       mutex_lock(&caching_ctl->mutex);
-       /* need to make sure the commit_root doesn't disappear */
-       down_read(&fs_info->commit_root_sem);
 
 next:
        ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
        if (ret < 0)
-               goto err;
+               goto out;
 
        leaf = path->nodes[0];
        nritems = btrfs_header_nritems(leaf);
@@ -477,12 +472,14 @@ next:
                                up_read(&fs_info->commit_root_sem);
                                mutex_unlock(&caching_ctl->mutex);
                                cond_resched();
-                               goto again;
+                               mutex_lock(&caching_ctl->mutex);
+                               down_read(&fs_info->commit_root_sem);
+                               goto next;
                        }
 
                        ret = btrfs_next_leaf(extent_root, path);
                        if (ret < 0)
-                               goto err;
+                               goto out;
                        if (ret)
                                break;
                        leaf = path->nodes[0];
@@ -521,7 +518,7 @@ next:
                        else
                                last = key.objectid + key.offset;
 
-                       if (total_found > (1024 * 1024 * 2)) {
+                       if (total_found > CACHING_CTL_WAKE_UP) {
                                total_found = 0;
                                if (wakeup)
                                        wake_up(&caching_ctl->wait);
@@ -534,9 +531,37 @@ next:
        total_found += add_new_free_space(block_group, fs_info, last,
                                          block_group->key.objectid +
                                          block_group->key.offset);
+       caching_ctl->progress = (u64)-1;
+
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+static noinline void caching_thread(struct btrfs_work *work)
+{
+       struct btrfs_block_group_cache *block_group;
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_caching_control *caching_ctl;
+       struct btrfs_root *extent_root;
+       int ret;
+
+       caching_ctl = container_of(work, struct btrfs_caching_control, work);
+       block_group = caching_ctl->block_group;
+       fs_info = block_group->fs_info;
+       extent_root = fs_info->extent_root;
+
+       mutex_lock(&caching_ctl->mutex);
+       down_read(&fs_info->commit_root_sem);
+
+       if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+               ret = load_free_space_tree(caching_ctl);
+       else
+               ret = load_extent_tree_free(caching_ctl);
+
        spin_lock(&block_group->lock);
        block_group->caching_ctl = NULL;
-       block_group->cached = BTRFS_CACHE_FINISHED;
+       block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
        spin_unlock(&block_group->lock);
 
 #ifdef CONFIG_BTRFS_DEBUG
@@ -555,20 +580,11 @@ next:
 #endif
 
        caching_ctl->progress = (u64)-1;
-err:
-       btrfs_free_path(path);
-       up_read(&fs_info->commit_root_sem);
-
-       free_excluded_extents(extent_root, block_group);
 
+       up_read(&fs_info->commit_root_sem);
+       free_excluded_extents(fs_info->extent_root, block_group);
        mutex_unlock(&caching_ctl->mutex);
-out:
-       if (ret) {
-               spin_lock(&block_group->lock);
-               block_group->caching_ctl = NULL;
-               block_group->cached = BTRFS_CACHE_ERROR;
-               spin_unlock(&block_group->lock);
-       }
+
        wake_up(&caching_ctl->wait);
 
        put_caching_control(caching_ctl);
@@ -680,8 +696,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                }
        } else {
                /*
-                * We are not going to do the fast caching, set cached to the
-                * appropriate value and wakeup any waiters.
+                * We're either using the free space tree or no caching at all.
+                * Set cached to the appropriate value and wakeup any waiters.
                 */
                spin_lock(&cache->lock);
                if (load_cache_only) {
@@ -2115,7 +2131,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       path->reada = 1;
+       path->reada = READA_FORWARD;
        path->leave_spinning = 1;
        /* this will setup the path even if it fails to insert the back ref */
        ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
@@ -2141,7 +2157,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 
-       path->reada = 1;
+       path->reada = READA_FORWARD;
        path->leave_spinning = 1;
        /* now insert the actual backref */
        ret = insert_extent_backref(trans, root->fs_info->extent_root,
@@ -2254,7 +2270,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
        }
 
 again:
-       path->reada = 1;
+       path->reada = READA_FORWARD;
        path->leave_spinning = 1;
        ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
                                path, 0, 1);
@@ -2910,6 +2926,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        if (trans->aborted)
                return 0;
 
+       if (root->fs_info->creating_free_space_tree)
+               return 0;
+
        if (root == root->fs_info->extent_root)
                root = root->fs_info->tree_root;
 
@@ -2988,9 +3007,9 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        extent_op->flags_to_set = flags;
-       extent_op->update_flags = 1;
-       extent_op->update_key = 0;
-       extent_op->is_data = is_data ? 1 : 0;
+       extent_op->update_flags = true;
+       extent_op->update_key = false;
+       extent_op->is_data = is_data ? true : false;
        extent_op->level = level;
 
        ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
@@ -3328,7 +3347,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
         * If this block group is smaller than 100 megs don't bother caching the
         * block group.
         */
-       if (block_group->key.offset < (100 * 1024 * 1024)) {
+       if (block_group->key.offset < (100 * SZ_1M)) {
                spin_lock(&block_group->lock);
                block_group->disk_cache_state = BTRFS_DC_WRITTEN;
                spin_unlock(&block_group->lock);
@@ -3428,7 +3447,7 @@ again:
         * taking up quite a bit since it's not folded into the other space
         * cache.
         */
-       num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
+       num_pages = div_u64(block_group->key.offset, SZ_256M);
        if (!num_pages)
                num_pages = 1;
 
@@ -3684,11 +3703,21 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        /*
-        * We don't need the lock here since we are protected by the transaction
-        * commit.  We want to do the cache_save_setup first and then run the
+        * Even though we are in the critical section of the transaction commit,
+        * we can still have concurrent tasks adding elements to this
+        * transaction's list of dirty block groups. These tasks correspond to
+        * endio free space workers started when writeback finishes for a
+        * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
+        * allocate new block groups as a result of COWing nodes of the root
+        * tree when updating the free space inode. The writeback for the space
+        * caches is triggered by an earlier call to
+        * btrfs_start_dirty_block_groups() and iterations of the following
+        * loop.
+        * Also we want to do the cache_save_setup first and then run the
         * delayed refs to make sure we have the best chance at doing this all
         * in one shot.
         */
+       spin_lock(&cur_trans->dirty_bgs_lock);
        while (!list_empty(&cur_trans->dirty_bgs)) {
                cache = list_first_entry(&cur_trans->dirty_bgs,
                                         struct btrfs_block_group_cache,
@@ -3700,11 +3729,13 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                 * finish and then do it all again
                 */
                if (!list_empty(&cache->io_list)) {
+                       spin_unlock(&cur_trans->dirty_bgs_lock);
                        list_del_init(&cache->io_list);
                        btrfs_wait_cache_io(root, trans, cache,
                                            &cache->io_ctl, path,
                                            cache->key.objectid);
                        btrfs_put_block_group(cache);
+                       spin_lock(&cur_trans->dirty_bgs_lock);
                }
 
                /*
@@ -3712,6 +3743,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                 * on any pending IO
                 */
                list_del_init(&cache->dirty_list);
+               spin_unlock(&cur_trans->dirty_bgs_lock);
                should_put = 1;
 
                cache_save_setup(cache, trans, path);
@@ -3736,6 +3768,25 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                }
                if (!ret) {
                        ret = write_one_cache_group(trans, root, path, cache);
+                       /*
+                        * One of the free space endio workers might have
+                        * created a new block group while updating a free space
+                        * cache's inode (at inode.c:btrfs_finish_ordered_io())
+                        * and hasn't released its transaction handle yet, in
+                        * which case the new block group is still attached to
+                        * its transaction handle and its creation has not
+                        * finished yet (no block group item in the extent tree
+                        * yet, etc). If this is the case, wait for all free
+                        * space endio workers to finish and retry. This is a
+                        * a very rare case so no need for a more efficient and
+                        * complex approach.
+                        */
+                       if (ret == -ENOENT) {
+                               wait_event(cur_trans->writer_wait,
+                                  atomic_read(&cur_trans->num_writers) == 1);
+                               ret = write_one_cache_group(trans, root, path,
+                                                           cache);
+                       }
                        if (ret)
                                btrfs_abort_transaction(trans, root, ret);
                }
@@ -3743,7 +3794,9 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                /* if its not on the io list, we need to put the block group */
                if (should_put)
                        btrfs_put_block_group(cache);
+               spin_lock(&cur_trans->dirty_bgs_lock);
        }
+       spin_unlock(&cur_trans->dirty_bgs_lock);
 
        while (!list_empty(io)) {
                cache = list_first_entry(io, struct btrfs_block_group_cache,
@@ -4086,8 +4139,10 @@ commit_trans:
                    !atomic_read(&root->fs_info->open_ioctl_trans)) {
                        need_commit--;
 
-                       if (need_commit > 0)
+                       if (need_commit > 0) {
+                               btrfs_start_delalloc_roots(fs_info, 0, -1);
                                btrfs_wait_ordered_roots(fs_info, -1);
+                       }
 
                        trans = btrfs_join_transaction(root);
                        if (IS_ERR(trans))
@@ -4100,11 +4155,12 @@ commit_trans:
                                if (ret)
                                        return ret;
                                /*
-                                * make sure that all running delayed iput are
-                                * done
+                                * The cleaner kthread might still be doing iput
+                                * operations. Wait for it to finish so that
+                                * more space is released.
                                 */
-                               down_write(&root->fs_info->delayed_iput_sem);
-                               up_write(&root->fs_info->delayed_iput_sem);
+                               mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
+                               mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
                                goto again;
                        } else {
                                btrfs_end_transaction(trans, root);
@@ -4239,14 +4295,13 @@ static int should_alloc_chunk(struct btrfs_root *root,
         */
        if (force == CHUNK_ALLOC_LIMITED) {
                thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
-               thresh = max_t(u64, 64 * 1024 * 1024,
-                              div_factor_fine(thresh, 1));
+               thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
 
                if (num_bytes - num_allocated < thresh)
                        return 1;
        }
 
-       if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
+       if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
                return 0;
        return 1;
 }
@@ -4446,7 +4501,7 @@ out:
         * transaction.
         */
        if (trans->can_flush_pending_bgs &&
-           trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+           trans->chunk_bytes_reserved >= (u64)SZ_2M) {
                btrfs_create_pending_block_groups(trans, trans->root);
                btrfs_trans_release_chunk_metadata(trans);
        }
@@ -4544,7 +4599,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
        return nr;
 }
 
-#define EXTENT_SIZE_PER_ITEM   (256 * 1024)
+#define EXTENT_SIZE_PER_ITEM   SZ_256K
 
 /*
  * shrink metadata reservation for delalloc
@@ -4749,8 +4804,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        u64 expected;
        u64 to_reclaim;
 
-       to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
-                               16 * 1024 * 1024);
+       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
        spin_lock(&space_info->lock);
        if (can_overcommit(root, space_info, to_reclaim,
                           BTRFS_RESERVE_FLUSH_ALL)) {
@@ -4761,8 +4815,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_pinned + space_info->bytes_readonly +
               space_info->bytes_may_use;
-       if (can_overcommit(root, space_info, 1024 * 1024,
-                          BTRFS_RESERVE_FLUSH_ALL))
+       if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
                expected = div_factor_fine(space_info->total_bytes, 95);
        else
                expected = div_factor_fine(space_info->total_bytes, 90);
@@ -5318,7 +5371,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
        spin_lock(&sinfo->lock);
        spin_lock(&block_rsv->lock);
 
-       block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+       block_rsv->size = min_t(u64, num_bytes, SZ_512M);
 
        num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
                    sinfo->bytes_reserved + sinfo->bytes_readonly +
@@ -6222,11 +6275,11 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
                return ret;
 
        if (ssd)
-               *empty_cluster = 2 * 1024 * 1024;
+               *empty_cluster = SZ_2M;
        if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
                ret = &root->fs_info->meta_alloc_cluster;
                if (!ssd)
-                       *empty_cluster = 64 * 1024;
+                       *empty_cluster = SZ_64K;
        } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
                ret = &root->fs_info->data_alloc_cluster;
        }
@@ -6438,7 +6491,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       path->reada = 1;
+       path->reada = READA_FORWARD;
        path->leave_spinning = 1;
 
        is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
@@ -6661,6 +6714,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                        }
                }
 
+               ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
+                                            num_bytes);
+               if (ret) {
+                       btrfs_abort_transaction(trans, extent_root, ret);
+                       goto out;
+               }
+
                ret = update_block_group(trans, root, bytenr, num_bytes, 0);
                if (ret) {
                        btrfs_abort_transaction(trans, extent_root, ret);
@@ -7672,6 +7732,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_free_path(path);
 
+       ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
+                                         ins->offset);
+       if (ret)
+               return ret;
+
        ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
        if (ret) { /* -ENOENT, logic error */
                btrfs_err(fs_info, "update block group failed for %llu %llu",
@@ -7752,6 +7817,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_free_path(path);
 
+       ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
+                                         num_bytes);
+       if (ret)
+               return ret;
+
        ret = update_block_group(trans, root, ins->objectid, root->nodesize,
                                 1);
        if (ret) { /* -ENOENT, logic error */
@@ -7834,7 +7904,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
 
        btrfs_set_lock_blocking(buf);
-       btrfs_set_buffer_uptodate(buf);
+       set_extent_buffer_uptodate(buf);
 
        if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
                buf->log_index = root->log_transid % 2;
@@ -7980,12 +8050,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                else
                        memset(&extent_op->key, 0, sizeof(extent_op->key));
                extent_op->flags_to_set = flags;
-               if (skinny_metadata)
-                       extent_op->update_key = 0;
-               else
-                       extent_op->update_key = 1;
-               extent_op->update_flags = 1;
-               extent_op->is_data = 0;
+               extent_op->update_key = skinny_metadata ? false : true;
+               extent_op->update_flags = true;
+               extent_op->is_data = false;
                extent_op->level = level;
 
                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
@@ -9124,7 +9191,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
        if ((sinfo->flags &
             (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
            !force)
-               min_allocable_bytes = 1 * 1024 * 1024;
+               min_allocable_bytes = SZ_1M;
        else
                min_allocable_bytes = 0;
 
@@ -9656,6 +9723,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
        cache->full_stripe_len = btrfs_full_stripe_len(root,
                                               &root->fs_info->mapping_tree,
                                               start);
+       set_free_space_tree_thresholds(cache);
+
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
        init_rwsem(&cache->data_rwsem);
@@ -9667,6 +9736,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
        INIT_LIST_HEAD(&cache->io_list);
        btrfs_init_free_space_ctl(cache);
        atomic_set(&cache->trimming, 0);
+       mutex_init(&cache->free_space_lock);
 
        return cache;
 }
@@ -9691,7 +9761,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
-       path->reada = 1;
+       path->reada = READA_FORWARD;
 
        cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
        if (btrfs_test_opt(root, SPACE_CACHE) &&
@@ -9877,6 +9947,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
                                               key.objectid, key.offset);
                if (ret)
                        btrfs_abort_transaction(trans, extent_root, ret);
+               add_block_group_free_space(trans, root->fs_info, block_group);
+               /* already aborted the transaction if it failed. */
 next:
                list_del_init(&block_group->bg_list);
        }
@@ -9907,6 +9979,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->flags = type;
        cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
+       cache->needs_free_space = 1;
        ret = exclude_super_stripes(root, cache);
        if (ret) {
                /*
@@ -10277,6 +10350,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        unlock_chunks(root);
 
+       ret = remove_block_group_free_space(trans, root->fs_info, block_group);
+       if (ret)
+               goto out;
+
        btrfs_put_block_group(block_group);
        btrfs_put_block_group(block_group);
 
@@ -10325,7 +10402,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
         * more device items and remove one chunk item), but this is done at
         * btrfs_remove_chunk() through a call to check_system_chunk().
         */
-       map = (struct map_lookup *)em->bdev;
+       map = em->map_lookup;
        num_items = 3 + map->num_stripes;
        free_extent_map(em);
 
@@ -10512,7 +10589,7 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
 
        disk_super = fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
-               return 1;
+               return -EINVAL;
 
        features = btrfs_super_incompat_flags(disk_super);
        if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
@@ -10742,3 +10819,23 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
        }
        return 1;
 }
+
+static int wait_snapshoting_atomic_t(atomic_t *a)
+{
+       schedule();
+       return 0;
+}
+
+void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
+{
+       while (true) {
+               int ret;
+
+               ret = btrfs_start_write_no_snapshoting(root);
+               if (ret)
+                       break;
+               wait_on_atomic_t(&root->will_be_snapshoted,
+                                wait_snapshoting_atomic_t,
+                                TASK_UNINTERRUPTIBLE);
+       }
+}