btrfs: qgroup: Use new metadata reservation.
[cascardo/linux.git] / fs / btrfs / extent-tree.c
index 9f96042..130df70 100644 (file)
@@ -2345,6 +2345,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
                                                      node->num_bytes);
                        }
                }
+
+               /* Also free its reserved qgroup space */
+               btrfs_qgroup_free_delayed_ref(root->fs_info,
+                                             head->qgroup_ref_root,
+                                             head->qgroup_reserved);
                return ret;
        }
 
@@ -2828,6 +2833,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_head *head;
        int ret;
        int run_all = count == (unsigned long)-1;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2844,6 +2850,7 @@ again:
 #ifdef SCRAMBLE_DELAYED_REFS
        delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
+       trans->can_flush_pending_bgs = false;
        ret = __btrfs_run_delayed_refs(trans, root, count);
        if (ret < 0) {
                btrfs_abort_transaction(trans, root, ret);
@@ -2893,6 +2900,7 @@ again:
        }
 out:
        assert_qgroups_uptodate(trans);
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
        return 0;
 }
 
@@ -3819,7 +3827,8 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
        u64 num_devices = root->fs_info->fs_devices->rw_devices;
        u64 target;
-       u64 tmp;
+       u64 raid_type;
+       u64 allowed = 0;
 
        /*
         * see if restripe for this chunk_type is in progress, if so
@@ -3837,31 +3846,26 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        spin_unlock(&root->fs_info->balance_lock);
 
        /* First, mask out the RAID levels which aren't possible */
-       if (num_devices == 1)
-               flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
-                          BTRFS_BLOCK_GROUP_RAID5);
-       if (num_devices < 3)
-               flags &= ~BTRFS_BLOCK_GROUP_RAID6;
-       if (num_devices < 4)
-               flags &= ~BTRFS_BLOCK_GROUP_RAID10;
-
-       tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
-                      BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
-                      BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
-       flags &= ~tmp;
-
-       if (tmp & BTRFS_BLOCK_GROUP_RAID6)
-               tmp = BTRFS_BLOCK_GROUP_RAID6;
-       else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
-               tmp = BTRFS_BLOCK_GROUP_RAID5;
-       else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
-               tmp = BTRFS_BLOCK_GROUP_RAID10;
-       else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
-               tmp = BTRFS_BLOCK_GROUP_RAID1;
-       else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
-               tmp = BTRFS_BLOCK_GROUP_RAID0;
-
-       return extended_to_chunk(flags | tmp);
+       for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
+               if (num_devices >= btrfs_raid_array[raid_type].devs_min)
+                       allowed |= btrfs_raid_group[raid_type];
+       }
+       allowed &= flags;
+
+       if (allowed & BTRFS_BLOCK_GROUP_RAID6)
+               allowed = BTRFS_BLOCK_GROUP_RAID6;
+       else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
+               allowed = BTRFS_BLOCK_GROUP_RAID5;
+       else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
+               allowed = BTRFS_BLOCK_GROUP_RAID10;
+       else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
+               allowed = BTRFS_BLOCK_GROUP_RAID1;
+       else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
+               allowed = BTRFS_BLOCK_GROUP_RAID0;
+
+       flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       return extended_to_chunk(flags | allowed);
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
@@ -4306,7 +4310,8 @@ out:
         * the block groups that were made dirty during the lifetime of the
         * transaction.
         */
-       if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+       if (trans->can_flush_pending_bgs &&
+           trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
                btrfs_create_pending_block_groups(trans, trans->root);
                btrfs_trans_release_chunk_metadata(trans);
        }
@@ -4887,13 +4892,9 @@ static struct btrfs_block_rsv *get_block_rsv(
 {
        struct btrfs_block_rsv *block_rsv = NULL;
 
-       if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
-               block_rsv = trans->block_rsv;
-
-       if (root == root->fs_info->csum_root && trans->adding_csums)
-               block_rsv = trans->block_rsv;
-
-       if (root == root->fs_info->uuid_root)
+       if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
+           (root == root->fs_info->csum_root && trans->adding_csums) ||
+            (root == root->fs_info->uuid_root))
                block_rsv = trans->block_rsv;
 
        if (!block_rsv)
@@ -5336,7 +5337,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (root->fs_info->quota_enabled) {
                /* One for parent inode, two for dir entries */
                num_bytes = 3 * root->nodesize;
-               ret = btrfs_qgroup_reserve(root, num_bytes);
+               ret = btrfs_qgroup_reserve_meta(root, num_bytes);
                if (ret)
                        return ret;
        } else {
@@ -5354,10 +5355,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (ret == -ENOSPC && use_global_rsv)
                ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
 
-       if (ret) {
-               if (*qgroup_reserved)
-                       btrfs_qgroup_free(root, *qgroup_reserved);
-       }
+       if (ret && *qgroup_reserved)
+               btrfs_qgroup_free_meta(root, *qgroup_reserved);
 
        return ret;
 }
@@ -5518,15 +5517,15 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        spin_unlock(&BTRFS_I(inode)->lock);
 
        if (root->fs_info->quota_enabled) {
-               ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
+               ret = btrfs_qgroup_reserve_meta(root,
+                               nr_extents * root->nodesize);
                if (ret)
                        goto out_fail;
        }
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
        if (unlikely(ret)) {
-               if (root->fs_info->quota_enabled)
-                       btrfs_qgroup_free(root, nr_extents * root->nodesize);
+               btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
                goto out_fail;
        }
 
@@ -8271,10 +8270,11 @@ skip:
                        ret = account_shared_subtree(trans, root, next,
                                                     generation, level - 1);
                        if (ret) {
-                               printk_ratelimited(KERN_ERR "BTRFS: %s Error "
+                               btrfs_err_rl(root->fs_info,
+                                       "Error "
                                        "%d accounting shared subtree. Quota "
-                                       "is out of sync, rescan required.\n",
-                                       root->fs_info->sb->s_id, ret);
+                                       "is out of sync, rescan required.",
+                                       ret);
                        }
                }
                ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
@@ -8363,10 +8363,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        BUG_ON(ret); /* -ENOMEM */
                        ret = account_leaf_items(trans, root, eb);
                        if (ret) {
-                               printk_ratelimited(KERN_ERR "BTRFS: %s Error "
+                               btrfs_err_rl(root->fs_info,
+                                       "error "
                                        "%d accounting leaf items. Quota "
-                                       "is out of sync, rescan required.\n",
-                                       root->fs_info->sb->s_id, ret);
+                                       "is out of sync, rescan required.",
+                                       ret);
                        }
                }
                /* make block locked assertion in clean_tree_block happy */
@@ -8688,7 +8689,7 @@ out:
        if (!for_reloc && root_dropped == false)
                btrfs_add_dead_root(root);
        if (err && err != -EAGAIN)
-               btrfs_std_error(root->fs_info, err);
+               btrfs_std_error(root->fs_info, err, NULL);
        return err;
 }
 
@@ -9560,7 +9561,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_item item;
        struct btrfs_key key;
        int ret = 0;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
+       trans->can_flush_pending_bgs = false;
        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
                if (ret)
                        goto next;
@@ -9581,6 +9584,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 next:
                list_del_init(&block_group->bg_list);
        }
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
 }
 
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
@@ -10363,8 +10367,7 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
 {
        percpu_counter_dec(&root->subv_writers->counter);
        /*
-        * Make sure counter is updated before we wake up
-        * waiters.
+        * Make sure counter is updated before we wake up waiters.
         */
        smp_mb();
        if (waitqueue_active(&root->subv_writers->wait))