2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
66 u64 lim_flags; /* which limits are set */
73 * reservation tracking
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
121 else if (qgroup->qgroupid > qgroupid)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
141 if (qgroup->qgroupid < qgroupid)
143 else if (qgroup->qgroupid > qgroupid)
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
151 return ERR_PTR(-ENOMEM);
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
164 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
166 struct btrfs_qgroup_list *list;
168 list_del(&qgroup->dirty);
169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
253 struct btrfs_qgroup *qgroup;
255 qgroup = find_qgroup_rb(fs_info, qgroupid);
258 if (qgroup->rfer != rfer || qgroup->excl != excl)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
270 struct btrfs_key key;
271 struct btrfs_key found_key;
272 struct btrfs_root *quota_root = fs_info->quota_root;
273 struct btrfs_path *path = NULL;
274 struct extent_buffer *l;
278 u64 rescan_progress = 0;
280 if (!fs_info->quota_enabled)
283 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
284 if (!fs_info->qgroup_ulist) {
289 path = btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info->qgroup_flags = 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
309 struct btrfs_qgroup *qgroup;
311 slot = path->slots[0];
313 btrfs_item_key_to_cpu(l, &found_key, slot);
315 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
316 struct btrfs_qgroup_status_item *ptr;
318 ptr = btrfs_item_ptr(l, slot,
319 struct btrfs_qgroup_status_item);
321 if (btrfs_qgroup_status_version(l, ptr) !=
322 BTRFS_QGROUP_STATUS_VERSION) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l, ptr) !=
328 fs_info->generation) {
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
336 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
340 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
341 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
344 qgroup = find_qgroup_rb(fs_info, found_key.offset);
345 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
346 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
347 btrfs_err(fs_info, "inconsitent qgroup config");
348 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
351 qgroup = add_qgroup_rb(fs_info, found_key.offset);
352 if (IS_ERR(qgroup)) {
353 ret = PTR_ERR(qgroup);
357 switch (found_key.type) {
358 case BTRFS_QGROUP_INFO_KEY: {
359 struct btrfs_qgroup_info_item *ptr;
361 ptr = btrfs_item_ptr(l, slot,
362 struct btrfs_qgroup_info_item);
363 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
364 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
365 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
366 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY: {
371 struct btrfs_qgroup_limit_item *ptr;
373 ptr = btrfs_item_ptr(l, slot,
374 struct btrfs_qgroup_limit_item);
375 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
376 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
377 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
378 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
379 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
384 ret = btrfs_next_item(quota_root, path);
390 btrfs_release_path(path);
393 * pass 2: read all qgroup relations
396 key.type = BTRFS_QGROUP_RELATION_KEY;
398 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
402 slot = path->slots[0];
404 btrfs_item_key_to_cpu(l, &found_key, slot);
406 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
409 if (found_key.objectid > found_key.offset) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret = add_relation_rb(fs_info, found_key.objectid,
417 if (ret == -ENOENT) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key.objectid, found_key.offset);
421 ret = 0; /* ignore the error */
426 ret = btrfs_next_item(quota_root, path);
433 fs_info->qgroup_flags |= flags;
434 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
435 fs_info->quota_enabled = 0;
436 fs_info->pending_quota_state = 0;
437 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
439 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
441 btrfs_free_path(path);
444 ulist_free(fs_info->qgroup_ulist);
445 fs_info->qgroup_ulist = NULL;
446 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
449 return ret < 0 ? ret : 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
461 struct btrfs_qgroup *qgroup;
463 while ((n = rb_first(&fs_info->qgroup_tree))) {
464 qgroup = rb_entry(n, struct btrfs_qgroup, node);
465 rb_erase(n, &fs_info->qgroup_tree);
466 __del_qgroup_rb(qgroup);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info->qgroup_ulist);
474 fs_info->qgroup_ulist = NULL;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
478 struct btrfs_root *quota_root,
482 struct btrfs_path *path;
483 struct btrfs_key key;
485 path = btrfs_alloc_path();
490 key.type = BTRFS_QGROUP_RELATION_KEY;
493 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
495 btrfs_mark_buffer_dirty(path->nodes[0]);
497 btrfs_free_path(path);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
502 struct btrfs_root *quota_root,
506 struct btrfs_path *path;
507 struct btrfs_key key;
509 path = btrfs_alloc_path();
514 key.type = BTRFS_QGROUP_RELATION_KEY;
517 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
526 ret = btrfs_del_item(trans, quota_root, path);
528 btrfs_free_path(path);
532 static int add_qgroup_item(struct btrfs_trans_handle *trans,
533 struct btrfs_root *quota_root, u64 qgroupid)
536 struct btrfs_path *path;
537 struct btrfs_qgroup_info_item *qgroup_info;
538 struct btrfs_qgroup_limit_item *qgroup_limit;
539 struct extent_buffer *leaf;
540 struct btrfs_key key;
542 if (btrfs_test_is_dummy_root(quota_root))
545 path = btrfs_alloc_path();
550 key.type = BTRFS_QGROUP_INFO_KEY;
551 key.offset = qgroupid;
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
559 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
560 sizeof(*qgroup_info));
561 if (ret && ret != -EEXIST)
564 leaf = path->nodes[0];
565 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
566 struct btrfs_qgroup_info_item);
567 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
568 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
570 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
573 btrfs_mark_buffer_dirty(leaf);
575 btrfs_release_path(path);
577 key.type = BTRFS_QGROUP_LIMIT_KEY;
578 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
579 sizeof(*qgroup_limit));
580 if (ret && ret != -EEXIST)
583 leaf = path->nodes[0];
584 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
585 struct btrfs_qgroup_limit_item);
586 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
592 btrfs_mark_buffer_dirty(leaf);
596 btrfs_free_path(path);
600 static int del_qgroup_item(struct btrfs_trans_handle *trans,
601 struct btrfs_root *quota_root, u64 qgroupid)
604 struct btrfs_path *path;
605 struct btrfs_key key;
607 path = btrfs_alloc_path();
612 key.type = BTRFS_QGROUP_INFO_KEY;
613 key.offset = qgroupid;
614 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
623 ret = btrfs_del_item(trans, quota_root, path);
627 btrfs_release_path(path);
629 key.type = BTRFS_QGROUP_LIMIT_KEY;
630 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
639 ret = btrfs_del_item(trans, quota_root, path);
642 btrfs_free_path(path);
646 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root,
648 struct btrfs_qgroup *qgroup)
650 struct btrfs_path *path;
651 struct btrfs_key key;
652 struct extent_buffer *l;
653 struct btrfs_qgroup_limit_item *qgroup_limit;
658 key.type = BTRFS_QGROUP_LIMIT_KEY;
659 key.offset = qgroup->qgroupid;
661 path = btrfs_alloc_path();
665 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
673 slot = path->slots[0];
674 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
675 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
676 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
677 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
678 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
679 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
681 btrfs_mark_buffer_dirty(l);
684 btrfs_free_path(path);
688 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root,
690 struct btrfs_qgroup *qgroup)
692 struct btrfs_path *path;
693 struct btrfs_key key;
694 struct extent_buffer *l;
695 struct btrfs_qgroup_info_item *qgroup_info;
699 if (btrfs_test_is_dummy_root(root))
703 key.type = BTRFS_QGROUP_INFO_KEY;
704 key.offset = qgroup->qgroupid;
706 path = btrfs_alloc_path();
710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
718 slot = path->slots[0];
719 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
720 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
721 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
722 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
723 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
724 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
726 btrfs_mark_buffer_dirty(l);
729 btrfs_free_path(path);
733 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
734 struct btrfs_fs_info *fs_info,
735 struct btrfs_root *root)
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *l;
740 struct btrfs_qgroup_status_item *ptr;
745 key.type = BTRFS_QGROUP_STATUS_KEY;
748 path = btrfs_alloc_path();
752 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
760 slot = path->slots[0];
761 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
762 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
763 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
764 btrfs_set_qgroup_status_rescan(l, ptr,
765 fs_info->qgroup_rescan_progress.objectid);
767 btrfs_mark_buffer_dirty(l);
770 btrfs_free_path(path);
775 * called with qgroup_lock held
777 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root)
780 struct btrfs_path *path;
781 struct btrfs_key key;
782 struct extent_buffer *leaf = NULL;
786 path = btrfs_alloc_path();
790 path->leave_spinning = 1;
797 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
800 leaf = path->nodes[0];
801 nr = btrfs_header_nritems(leaf);
805 * delete the leaf one by one
806 * since the whole tree is going
810 ret = btrfs_del_items(trans, root, path, 0, nr);
814 btrfs_release_path(path);
818 root->fs_info->pending_quota_state = 0;
819 btrfs_free_path(path);
823 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
824 struct btrfs_fs_info *fs_info)
826 struct btrfs_root *quota_root;
827 struct btrfs_root *tree_root = fs_info->tree_root;
828 struct btrfs_path *path = NULL;
829 struct btrfs_qgroup_status_item *ptr;
830 struct extent_buffer *leaf;
831 struct btrfs_key key;
832 struct btrfs_key found_key;
833 struct btrfs_qgroup *qgroup = NULL;
837 mutex_lock(&fs_info->qgroup_ioctl_lock);
838 if (fs_info->quota_root) {
839 fs_info->pending_quota_state = 1;
843 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
844 if (!fs_info->qgroup_ulist) {
850 * initially create the quota tree
852 quota_root = btrfs_create_tree(trans, fs_info,
853 BTRFS_QUOTA_TREE_OBJECTID);
854 if (IS_ERR(quota_root)) {
855 ret = PTR_ERR(quota_root);
859 path = btrfs_alloc_path();
866 key.type = BTRFS_QGROUP_STATUS_KEY;
869 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
874 leaf = path->nodes[0];
875 ptr = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_qgroup_status_item);
877 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
878 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
879 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
880 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
881 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
882 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
884 btrfs_mark_buffer_dirty(leaf);
887 key.type = BTRFS_ROOT_REF_KEY;
890 btrfs_release_path(path);
891 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
899 slot = path->slots[0];
900 leaf = path->nodes[0];
901 btrfs_item_key_to_cpu(leaf, &found_key, slot);
903 if (found_key.type == BTRFS_ROOT_REF_KEY) {
904 ret = add_qgroup_item(trans, quota_root,
909 qgroup = add_qgroup_rb(fs_info, found_key.offset);
910 if (IS_ERR(qgroup)) {
911 ret = PTR_ERR(qgroup);
915 ret = btrfs_next_item(tree_root, path);
923 btrfs_release_path(path);
924 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
928 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
929 if (IS_ERR(qgroup)) {
930 ret = PTR_ERR(qgroup);
933 spin_lock(&fs_info->qgroup_lock);
934 fs_info->quota_root = quota_root;
935 fs_info->pending_quota_state = 1;
936 spin_unlock(&fs_info->qgroup_lock);
938 btrfs_free_path(path);
941 free_extent_buffer(quota_root->node);
942 free_extent_buffer(quota_root->commit_root);
947 ulist_free(fs_info->qgroup_ulist);
948 fs_info->qgroup_ulist = NULL;
950 mutex_unlock(&fs_info->qgroup_ioctl_lock);
954 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
955 struct btrfs_fs_info *fs_info)
957 struct btrfs_root *tree_root = fs_info->tree_root;
958 struct btrfs_root *quota_root;
961 mutex_lock(&fs_info->qgroup_ioctl_lock);
962 if (!fs_info->quota_root)
964 spin_lock(&fs_info->qgroup_lock);
965 fs_info->quota_enabled = 0;
966 fs_info->pending_quota_state = 0;
967 quota_root = fs_info->quota_root;
968 fs_info->quota_root = NULL;
969 spin_unlock(&fs_info->qgroup_lock);
971 btrfs_free_qgroup_config(fs_info);
973 ret = btrfs_clean_quota_tree(trans, quota_root);
977 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
981 list_del("a_root->dirty_list);
983 btrfs_tree_lock(quota_root->node);
984 clean_tree_block(trans, tree_root->fs_info, quota_root->node);
985 btrfs_tree_unlock(quota_root->node);
986 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
988 free_extent_buffer(quota_root->node);
989 free_extent_buffer(quota_root->commit_root);
992 mutex_unlock(&fs_info->qgroup_ioctl_lock);
996 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
997 struct btrfs_qgroup *qgroup)
999 if (list_empty(&qgroup->dirty))
1000 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1003 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1004 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1006 struct btrfs_root *quota_root;
1007 struct btrfs_qgroup *parent;
1008 struct btrfs_qgroup *member;
1009 struct btrfs_qgroup_list *list;
1012 mutex_lock(&fs_info->qgroup_ioctl_lock);
1013 quota_root = fs_info->quota_root;
1018 member = find_qgroup_rb(fs_info, src);
1019 parent = find_qgroup_rb(fs_info, dst);
1020 if (!member || !parent) {
1025 /* check if such qgroup relation exist firstly */
1026 list_for_each_entry(list, &member->groups, next_group) {
1027 if (list->group == parent) {
1033 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1037 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1039 del_qgroup_relation_item(trans, quota_root, src, dst);
1043 spin_lock(&fs_info->qgroup_lock);
1044 ret = add_relation_rb(quota_root->fs_info, src, dst);
1045 spin_unlock(&fs_info->qgroup_lock);
1047 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1051 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1052 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1054 struct btrfs_root *quota_root;
1055 struct btrfs_qgroup *parent;
1056 struct btrfs_qgroup *member;
1057 struct btrfs_qgroup_list *list;
1061 mutex_lock(&fs_info->qgroup_ioctl_lock);
1062 quota_root = fs_info->quota_root;
1068 member = find_qgroup_rb(fs_info, src);
1069 parent = find_qgroup_rb(fs_info, dst);
1070 if (!member || !parent) {
1075 /* check if such qgroup relation exist firstly */
1076 list_for_each_entry(list, &member->groups, next_group) {
1077 if (list->group == parent)
1083 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1084 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1088 spin_lock(&fs_info->qgroup_lock);
1089 del_relation_rb(fs_info, src, dst);
1090 spin_unlock(&fs_info->qgroup_lock);
1092 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1096 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1097 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1099 struct btrfs_root *quota_root;
1100 struct btrfs_qgroup *qgroup;
1103 mutex_lock(&fs_info->qgroup_ioctl_lock);
1104 quota_root = fs_info->quota_root;
1109 qgroup = find_qgroup_rb(fs_info, qgroupid);
1115 ret = add_qgroup_item(trans, quota_root, qgroupid);
1119 spin_lock(&fs_info->qgroup_lock);
1120 qgroup = add_qgroup_rb(fs_info, qgroupid);
1121 spin_unlock(&fs_info->qgroup_lock);
1124 ret = PTR_ERR(qgroup);
1126 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1130 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1131 struct btrfs_fs_info *fs_info, u64 qgroupid)
1133 struct btrfs_root *quota_root;
1134 struct btrfs_qgroup *qgroup;
1137 mutex_lock(&fs_info->qgroup_ioctl_lock);
1138 quota_root = fs_info->quota_root;
1144 qgroup = find_qgroup_rb(fs_info, qgroupid);
1149 /* check if there are no relations to this qgroup */
1150 if (!list_empty(&qgroup->groups) ||
1151 !list_empty(&qgroup->members)) {
1156 ret = del_qgroup_item(trans, quota_root, qgroupid);
1158 spin_lock(&fs_info->qgroup_lock);
1159 del_qgroup_rb(quota_root->fs_info, qgroupid);
1160 spin_unlock(&fs_info->qgroup_lock);
1162 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1166 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 qgroupid,
1168 struct btrfs_qgroup_limit *limit)
1170 struct btrfs_root *quota_root;
1171 struct btrfs_qgroup *qgroup;
1174 mutex_lock(&fs_info->qgroup_ioctl_lock);
1175 quota_root = fs_info->quota_root;
1181 qgroup = find_qgroup_rb(fs_info, qgroupid);
1187 spin_lock(&fs_info->qgroup_lock);
1188 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
1189 qgroup->max_rfer = limit->max_rfer;
1190 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
1191 qgroup->max_excl = limit->max_excl;
1192 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
1193 qgroup->rsv_rfer = limit->rsv_rfer;
1194 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
1195 qgroup->rsv_excl = limit->rsv_excl;
1196 qgroup->lim_flags |= limit->flags;
1198 spin_unlock(&fs_info->qgroup_lock);
1200 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1202 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1203 btrfs_info(fs_info, "unable to update quota limit for %llu",
1208 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1212 static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
1213 struct btrfs_qgroup_operation *oper2)
1216 * Ignore seq and type here, we're looking for any operation
1217 * at all related to this extent on that root.
1219 if (oper1->bytenr < oper2->bytenr)
1221 if (oper1->bytenr > oper2->bytenr)
1223 if (oper1->ref_root < oper2->ref_root)
1225 if (oper1->ref_root > oper2->ref_root)
1230 static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
1231 struct btrfs_qgroup_operation *oper)
1234 struct btrfs_qgroup_operation *cur;
1237 spin_lock(&fs_info->qgroup_op_lock);
1238 n = fs_info->qgroup_op_tree.rb_node;
1240 cur = rb_entry(n, struct btrfs_qgroup_operation, n);
1241 cmp = comp_oper_exist(cur, oper);
1247 spin_unlock(&fs_info->qgroup_op_lock);
1251 spin_unlock(&fs_info->qgroup_op_lock);
1255 static int comp_oper(struct btrfs_qgroup_operation *oper1,
1256 struct btrfs_qgroup_operation *oper2)
1258 if (oper1->bytenr < oper2->bytenr)
1260 if (oper1->bytenr > oper2->bytenr)
1262 if (oper1->ref_root < oper2->ref_root)
1264 if (oper1->ref_root > oper2->ref_root)
1266 if (oper1->seq < oper2->seq)
1268 if (oper1->seq > oper2->seq)
1270 if (oper1->type < oper2->type)
1272 if (oper1->type > oper2->type)
1277 static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1278 struct btrfs_qgroup_operation *oper)
1281 struct rb_node *parent = NULL;
1282 struct btrfs_qgroup_operation *cur;
1285 spin_lock(&fs_info->qgroup_op_lock);
1286 p = &fs_info->qgroup_op_tree.rb_node;
1289 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1290 cmp = comp_oper(cur, oper);
1292 p = &(*p)->rb_right;
1296 spin_unlock(&fs_info->qgroup_op_lock);
1300 rb_link_node(&oper->n, parent, p);
1301 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1302 spin_unlock(&fs_info->qgroup_op_lock);
1307 * Record a quota operation for processing later on.
1308 * @trans: the transaction we are adding the delayed op to.
1309 * @fs_info: the fs_info for this fs.
1310 * @ref_root: the root of the reference we are acting on,
1311 * @bytenr: the bytenr we are acting on.
1312 * @num_bytes: the number of bytes in the reference.
1313 * @type: the type of operation this is.
1314 * @mod_seq: do we need to get a sequence number for looking up roots.
1316 * We just add it to our trans qgroup_ref_list and carry on and process these
1317 * operations in order at some later point. If the reference root isn't a fs
1318 * root then we don't bother with doing anything.
1320 * MUST BE HOLDING THE REF LOCK.
1322 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1323 struct btrfs_fs_info *fs_info, u64 ref_root,
1324 u64 bytenr, u64 num_bytes,
1325 enum btrfs_qgroup_operation_type type, int mod_seq)
1327 struct btrfs_qgroup_operation *oper;
1330 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1333 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1337 oper->ref_root = ref_root;
1338 oper->bytenr = bytenr;
1339 oper->num_bytes = num_bytes;
1341 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1342 INIT_LIST_HEAD(&oper->elem.list);
1345 trace_btrfs_qgroup_record_ref(oper);
1347 if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
1349 * If any operation for this bytenr/ref_root combo
1350 * exists, then we know it's not exclusively owned and
1351 * shouldn't be queued up.
1353 * This also catches the case where we have a cloned
1354 * extent that gets queued up multiple times during
1357 if (qgroup_oper_exists(fs_info, oper)) {
1363 ret = insert_qgroup_oper(fs_info, oper);
1365 /* Shouldn't happen so have an assert for developers */
1370 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1373 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
1379 * The easy accounting, if we are adding/removing the only ref for an extent
1380 * then this qgroup and all of the parent qgroups get their refrence and
1381 * exclusive counts adjusted.
1383 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1384 struct btrfs_qgroup_operation *oper)
1386 struct btrfs_qgroup *qgroup;
1388 struct btrfs_qgroup_list *glist;
1389 struct ulist_node *unode;
1390 struct ulist_iterator uiter;
1394 tmp = ulist_alloc(GFP_NOFS);
1398 spin_lock(&fs_info->qgroup_lock);
1399 if (!fs_info->quota_root)
1401 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1404 switch (oper->type) {
1405 case BTRFS_QGROUP_OPER_ADD_EXCL:
1408 case BTRFS_QGROUP_OPER_SUB_EXCL:
1414 qgroup->rfer += sign * oper->num_bytes;
1415 qgroup->rfer_cmpr += sign * oper->num_bytes;
1417 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1418 qgroup->excl += sign * oper->num_bytes;
1419 qgroup->excl_cmpr += sign * oper->num_bytes;
1421 qgroup_dirty(fs_info, qgroup);
1423 /* Get all of the parent groups that contain this qgroup */
1424 list_for_each_entry(glist, &qgroup->groups, next_group) {
1425 ret = ulist_add(tmp, glist->group->qgroupid,
1426 ptr_to_u64(glist->group), GFP_ATOMIC);
1431 /* Iterate all of the parents and adjust their reference counts */
1432 ULIST_ITER_INIT(&uiter);
1433 while ((unode = ulist_next(tmp, &uiter))) {
1434 qgroup = u64_to_ptr(unode->aux);
1435 qgroup->rfer += sign * oper->num_bytes;
1436 qgroup->rfer_cmpr += sign * oper->num_bytes;
1437 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1438 qgroup->excl += sign * oper->num_bytes;
1439 qgroup->excl_cmpr += sign * oper->num_bytes;
1440 qgroup_dirty(fs_info, qgroup);
1442 /* Add any parents of the parents */
1443 list_for_each_entry(glist, &qgroup->groups, next_group) {
1444 ret = ulist_add(tmp, glist->group->qgroupid,
1445 ptr_to_u64(glist->group), GFP_ATOMIC);
1452 spin_unlock(&fs_info->qgroup_lock);
1458 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1461 static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1462 u64 root_to_skip, struct ulist *tmp,
1463 struct ulist *roots, struct ulist *qgroups,
1464 u64 seq, int *old_roots, int rescan)
1466 struct ulist_node *unode;
1467 struct ulist_iterator uiter;
1468 struct ulist_node *tmp_unode;
1469 struct ulist_iterator tmp_uiter;
1470 struct btrfs_qgroup *qg;
1473 ULIST_ITER_INIT(&uiter);
1474 while ((unode = ulist_next(roots, &uiter))) {
1475 /* We don't count our current root here */
1476 if (unode->val == root_to_skip)
1478 qg = find_qgroup_rb(fs_info, unode->val);
1482 * We could have a pending removal of this same ref so we may
1483 * not have actually found our ref root when doing
1484 * btrfs_find_all_roots, so we need to keep track of how many
1485 * old roots we find in case we removed ours and added a
1486 * different one at the same time. I don't think this could
1487 * happen in practice but that sort of thinking leads to pain
1488 * and suffering and to the dark side.
1493 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1497 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1500 ULIST_ITER_INIT(&tmp_uiter);
1501 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1502 struct btrfs_qgroup_list *glist;
1504 qg = u64_to_ptr(tmp_unode->aux);
1506 * We use this sequence number to keep from having to
1507 * run the whole list and 0 out the refcnt every time.
1508 * We basically use sequnce as the known 0 count and
1509 * then add 1 everytime we see a qgroup. This is how we
1510 * get how many of the roots actually point up to the
1511 * upper level qgroups in order to determine exclusive
1514 * For rescan we want to set old_refcnt to seq so our
1515 * exclusive calculations end up correct.
1518 qg->old_refcnt = seq;
1519 else if (qg->old_refcnt < seq)
1520 qg->old_refcnt = seq + 1;
1524 if (qg->new_refcnt < seq)
1525 qg->new_refcnt = seq + 1;
1528 list_for_each_entry(glist, &qg->groups, next_group) {
1529 ret = ulist_add(qgroups, glist->group->qgroupid,
1530 ptr_to_u64(glist->group),
1534 ret = ulist_add(tmp, glist->group->qgroupid,
1535 ptr_to_u64(glist->group),
1546 * We need to walk forward in our operation tree and account for any roots that
1547 * were deleted after we made this operation.
1549 static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1550 struct btrfs_qgroup_operation *oper,
1552 struct ulist *qgroups, u64 seq,
1555 struct ulist_node *unode;
1556 struct ulist_iterator uiter;
1557 struct btrfs_qgroup *qg;
1558 struct btrfs_qgroup_operation *tmp_oper;
1565 * We only walk forward in the tree since we're only interested in
1566 * removals that happened _after_ our operation.
1568 spin_lock(&fs_info->qgroup_op_lock);
1569 n = rb_next(&oper->n);
1570 spin_unlock(&fs_info->qgroup_op_lock);
1573 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1574 while (tmp_oper->bytenr == oper->bytenr) {
1576 * If it's not a removal we don't care, additions work out
1577 * properly with our refcnt tracking.
1579 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1580 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1582 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1585 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1591 * We only want to increase old_roots if this qgroup is
1592 * not already in the list of qgroups. If it is already
1593 * there then that means it must have been re-added or
1594 * the delete will be discarded because we had an
1595 * existing ref that we haven't looked up yet. In this
1596 * case we don't want to increase old_roots. So if ret
1597 * == 1 then we know that this is the first time we've
1598 * seen this qgroup and we can bump the old_roots.
1601 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1607 spin_lock(&fs_info->qgroup_op_lock);
1608 n = rb_next(&tmp_oper->n);
1609 spin_unlock(&fs_info->qgroup_op_lock);
1612 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1615 /* Ok now process the qgroups we found */
1616 ULIST_ITER_INIT(&uiter);
1617 while ((unode = ulist_next(tmp, &uiter))) {
1618 struct btrfs_qgroup_list *glist;
1620 qg = u64_to_ptr(unode->aux);
1621 if (qg->old_refcnt < seq)
1622 qg->old_refcnt = seq + 1;
1625 if (qg->new_refcnt < seq)
1626 qg->new_refcnt = seq + 1;
1629 list_for_each_entry(glist, &qg->groups, next_group) {
1630 ret = ulist_add(qgroups, glist->group->qgroupid,
1631 ptr_to_u64(glist->group), GFP_ATOMIC);
1634 ret = ulist_add(tmp, glist->group->qgroupid,
1635 ptr_to_u64(glist->group), GFP_ATOMIC);
1643 /* Add refcnt for the newly added reference. */
1644 static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1645 struct btrfs_qgroup_operation *oper,
1646 struct btrfs_qgroup *qgroup,
1647 struct ulist *tmp, struct ulist *qgroups,
1650 struct ulist_node *unode;
1651 struct ulist_iterator uiter;
1652 struct btrfs_qgroup *qg;
1656 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1660 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1664 ULIST_ITER_INIT(&uiter);
1665 while ((unode = ulist_next(tmp, &uiter))) {
1666 struct btrfs_qgroup_list *glist;
1668 qg = u64_to_ptr(unode->aux);
1669 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1670 if (qg->new_refcnt < seq)
1671 qg->new_refcnt = seq + 1;
1675 if (qg->old_refcnt < seq)
1676 qg->old_refcnt = seq + 1;
1680 list_for_each_entry(glist, &qg->groups, next_group) {
1681 ret = ulist_add(tmp, glist->group->qgroupid,
1682 ptr_to_u64(glist->group), GFP_ATOMIC);
1685 ret = ulist_add(qgroups, glist->group->qgroupid,
1686 ptr_to_u64(glist->group), GFP_ATOMIC);
1695 * This adjusts the counters for all referenced qgroups if need be.
1697 static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1698 u64 root_to_skip, u64 num_bytes,
1699 struct ulist *qgroups, u64 seq,
1700 int old_roots, int new_roots, int rescan)
1702 struct ulist_node *unode;
1703 struct ulist_iterator uiter;
1704 struct btrfs_qgroup *qg;
1705 u64 cur_new_count, cur_old_count;
1707 ULIST_ITER_INIT(&uiter);
1708 while ((unode = ulist_next(qgroups, &uiter))) {
1711 qg = u64_to_ptr(unode->aux);
1713 * Wasn't referenced before but is now, add to the reference
1716 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1717 qg->rfer += num_bytes;
1718 qg->rfer_cmpr += num_bytes;
1723 * Was referenced before but isn't now, subtract from the
1724 * reference counters.
1726 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1727 qg->rfer -= num_bytes;
1728 qg->rfer_cmpr -= num_bytes;
1732 if (qg->old_refcnt < seq)
1735 cur_old_count = qg->old_refcnt - seq;
1736 if (qg->new_refcnt < seq)
1739 cur_new_count = qg->new_refcnt - seq;
1742 * If our refcount was the same as the roots previously but our
1743 * new count isn't the same as the number of roots now then we
1744 * went from having a exclusive reference on this range to not.
1746 if (old_roots && cur_old_count == old_roots &&
1747 (cur_new_count != new_roots || new_roots == 0)) {
1748 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1749 qg->excl -= num_bytes;
1750 qg->excl_cmpr -= num_bytes;
1755 * If we didn't reference all the roots before but now we do we
1756 * have an exclusive reference to this range.
1758 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1759 && cur_new_count == new_roots) {
1760 qg->excl += num_bytes;
1761 qg->excl_cmpr += num_bytes;
1766 qgroup_dirty(fs_info, qg);
1772 * If we removed a data extent and there were other references for that bytenr
1773 * then we need to lookup all referenced roots to make sure we still don't
1774 * reference this bytenr. If we do then we can just discard this operation.
1776 static int check_existing_refs(struct btrfs_trans_handle *trans,
1777 struct btrfs_fs_info *fs_info,
1778 struct btrfs_qgroup_operation *oper)
1780 struct ulist *roots = NULL;
1781 struct ulist_node *unode;
1782 struct ulist_iterator uiter;
1785 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1786 oper->elem.seq, &roots);
1791 ULIST_ITER_INIT(&uiter);
1792 while ((unode = ulist_next(roots, &uiter))) {
1793 if (unode->val == oper->ref_root) {
1799 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1805 * If we share a reference across multiple roots then we may need to adjust
1806 * various qgroups referenced and exclusive counters. The basic premise is this
1808 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1809 * qgroups and resetting their refcount to 0 we just constantly bump this
1810 * sequence number to act as the base reference count. This means that if
1811 * anybody is equal to or below this sequence they were never referenced. We
1812 * jack this sequence up by the number of roots we found each time in order to
1813 * make sure we don't have any overlap.
1815 * 2) We first search all the roots that reference the area _except_ the root
1816 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1819 * 3) We walk all of the qgroups referenced by the root we are currently acting
1820 * on, and will either adjust old_refcnt in the case of a removal or the
1821 * new_refcnt in the case of an addition.
1823 * 4) Finally we walk all the qgroups that are referenced by this range
1824 * including the root we are acting on currently. We will adjust the counters
1825 * based on the number of roots we had and will have after this operation.
1827 * Take this example as an illustration
1831 * [qg 0/0] [qg 0/1] [qg 0/2]
1835 * Say we are adding a reference that is covered by qg 0/0. The first step
1836 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1837 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1838 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1839 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1840 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1841 * reference and thus must add the size to the referenced bytes. Everything
1842 * else is the same so nothing else changes.
1844 static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1845 struct btrfs_fs_info *fs_info,
1846 struct btrfs_qgroup_operation *oper)
1848 struct ulist *roots = NULL;
1849 struct ulist *qgroups, *tmp;
1850 struct btrfs_qgroup *qgroup;
1851 struct seq_list elem = SEQ_LIST_INIT(elem);
1857 if (oper->elem.seq) {
1858 ret = check_existing_refs(trans, fs_info, oper);
1865 qgroups = ulist_alloc(GFP_NOFS);
1869 tmp = ulist_alloc(GFP_NOFS);
1871 ulist_free(qgroups);
1875 btrfs_get_tree_mod_seq(fs_info, &elem);
1876 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1878 btrfs_put_tree_mod_seq(fs_info, &elem);
1880 ulist_free(qgroups);
1884 spin_lock(&fs_info->qgroup_lock);
1885 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1888 seq = fs_info->qgroup_seq;
1891 * So roots is the list of all the roots currently pointing at the
1892 * bytenr, including the ref we are adding if we are adding, or not if
1893 * we are removing a ref. So we pass in the ref_root to skip that root
1894 * in our calculations. We set old_refnct and new_refcnt cause who the
1895 * hell knows what everything looked like before, and it doesn't matter
1898 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
1899 seq, &old_roots, 0);
1904 * Now adjust the refcounts of the qgroups that care about this
1905 * reference, either the old_count in the case of removal or new_count
1906 * in the case of an addition.
1908 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
1914 * ...in the case of removals. If we had a removal before we got around
1915 * to processing this operation then we need to find that guy and count
1916 * his references as if they really existed so we don't end up screwing
1917 * up the exclusive counts. Then whenever we go to process the delete
1918 * everything will be grand and we can account for whatever exclusive
1919 * changes need to be made there. We also have to pass in old_roots so
1920 * we have an accurate count of the roots as it pertains to this
1921 * operations view of the world.
1923 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
1929 * We are adding our root, need to adjust up the number of roots,
1930 * otherwise old_roots is the number of roots we want.
1932 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1933 new_roots = old_roots + 1;
1935 new_roots = old_roots;
1938 fs_info->qgroup_seq += old_roots + 1;
1942 * And now the magic happens, bless Arne for having a pretty elegant
1943 * solution for this.
1945 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
1946 qgroups, seq, old_roots, new_roots, 0);
1948 spin_unlock(&fs_info->qgroup_lock);
1949 ulist_free(qgroups);
1956 * Process a reference to a shared subtree. This type of operation is
1957 * queued during snapshot removal when we encounter extents which are
1958 * shared between more than one root.
1960 static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
1961 struct btrfs_fs_info *fs_info,
1962 struct btrfs_qgroup_operation *oper)
1964 struct ulist *roots = NULL;
1965 struct ulist_node *unode;
1966 struct ulist_iterator uiter;
1967 struct btrfs_qgroup_list *glist;
1968 struct ulist *parents;
1971 struct btrfs_qgroup *qg;
1973 struct seq_list elem = SEQ_LIST_INIT(elem);
1975 parents = ulist_alloc(GFP_NOFS);
1979 btrfs_get_tree_mod_seq(fs_info, &elem);
1980 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1982 btrfs_put_tree_mod_seq(fs_info, &elem);
1986 if (roots->nnodes != 1)
1989 ULIST_ITER_INIT(&uiter);
1990 unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
1992 * If we find our ref root then that means all refs
1993 * this extent has to the root have not yet been
1994 * deleted. In that case, we do nothing and let the
1995 * last ref for this bytenr drive our update.
1997 * This can happen for example if an extent is
1998 * referenced multiple times in a snapshot (clone,
1999 * etc). If we are in the middle of snapshot removal,
2000 * queued updates for such an extent will find the
2001 * root if we have not yet finished removing the
2004 if (unode->val == oper->ref_root)
2007 root_obj = unode->val;
2010 spin_lock(&fs_info->qgroup_lock);
2011 qg = find_qgroup_rb(fs_info, root_obj);
2015 qg->excl += oper->num_bytes;
2016 qg->excl_cmpr += oper->num_bytes;
2017 qgroup_dirty(fs_info, qg);
2020 * Adjust counts for parent groups. First we find all
2021 * parents, then in the 2nd loop we do the adjustment
2022 * while adding parents of the parents to our ulist.
2024 list_for_each_entry(glist, &qg->groups, next_group) {
2025 err = ulist_add(parents, glist->group->qgroupid,
2026 ptr_to_u64(glist->group), GFP_ATOMIC);
2033 ULIST_ITER_INIT(&uiter);
2034 while ((unode = ulist_next(parents, &uiter))) {
2035 qg = u64_to_ptr(unode->aux);
2036 qg->excl += oper->num_bytes;
2037 qg->excl_cmpr += oper->num_bytes;
2038 qgroup_dirty(fs_info, qg);
2040 /* Add any parents of the parents */
2041 list_for_each_entry(glist, &qg->groups, next_group) {
2042 err = ulist_add(parents, glist->group->qgroupid,
2043 ptr_to_u64(glist->group), GFP_ATOMIC);
2052 spin_unlock(&fs_info->qgroup_lock);
2056 ulist_free(parents);
2061 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2062 * from the fs. First, all roots referencing the extent are searched, and
2063 * then the space is accounted accordingly to the different roots. The
2064 * accounting algorithm works in 3 steps documented inline.
2066 static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
2067 struct btrfs_fs_info *fs_info,
2068 struct btrfs_qgroup_operation *oper)
2072 if (!fs_info->quota_enabled)
2075 BUG_ON(!fs_info->quota_root);
2077 mutex_lock(&fs_info->qgroup_rescan_lock);
2078 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2079 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
2080 mutex_unlock(&fs_info->qgroup_rescan_lock);
2084 mutex_unlock(&fs_info->qgroup_rescan_lock);
2086 ASSERT(is_fstree(oper->ref_root));
2088 trace_btrfs_qgroup_account(oper);
2090 switch (oper->type) {
2091 case BTRFS_QGROUP_OPER_ADD_EXCL:
2092 case BTRFS_QGROUP_OPER_SUB_EXCL:
2093 ret = qgroup_excl_accounting(fs_info, oper);
2095 case BTRFS_QGROUP_OPER_ADD_SHARED:
2096 case BTRFS_QGROUP_OPER_SUB_SHARED:
2097 ret = qgroup_shared_accounting(trans, fs_info, oper);
2099 case BTRFS_QGROUP_OPER_SUB_SUBTREE:
2100 ret = qgroup_subtree_accounting(trans, fs_info, oper);
2109 * Needs to be called everytime we run delayed refs, even if there is an error
2110 * in order to cleanup outstanding operations.
2112 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
2113 struct btrfs_fs_info *fs_info)
2115 struct btrfs_qgroup_operation *oper;
2118 while (!list_empty(&trans->qgroup_ref_list)) {
2119 oper = list_first_entry(&trans->qgroup_ref_list,
2120 struct btrfs_qgroup_operation, list);
2121 list_del_init(&oper->list);
2122 if (!ret || !trans->aborted)
2123 ret = btrfs_qgroup_account(trans, fs_info, oper);
2124 spin_lock(&fs_info->qgroup_op_lock);
2125 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
2126 spin_unlock(&fs_info->qgroup_op_lock);
2127 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
2134 * called from commit_transaction. Writes all changed qgroups to disk.
2136 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2137 struct btrfs_fs_info *fs_info)
2139 struct btrfs_root *quota_root = fs_info->quota_root;
2141 int start_rescan_worker = 0;
2146 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
2147 start_rescan_worker = 1;
2149 fs_info->quota_enabled = fs_info->pending_quota_state;
2151 spin_lock(&fs_info->qgroup_lock);
2152 while (!list_empty(&fs_info->dirty_qgroups)) {
2153 struct btrfs_qgroup *qgroup;
2154 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2155 struct btrfs_qgroup, dirty);
2156 list_del_init(&qgroup->dirty);
2157 spin_unlock(&fs_info->qgroup_lock);
2158 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2160 fs_info->qgroup_flags |=
2161 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2162 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2164 fs_info->qgroup_flags |=
2165 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2166 spin_lock(&fs_info->qgroup_lock);
2168 if (fs_info->quota_enabled)
2169 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2171 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2172 spin_unlock(&fs_info->qgroup_lock);
2174 ret = update_qgroup_status_item(trans, fs_info, quota_root);
2176 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2178 if (!ret && start_rescan_worker) {
2179 ret = qgroup_rescan_init(fs_info, 0, 1);
2181 qgroup_rescan_zero_tracking(fs_info);
2182 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2183 &fs_info->qgroup_rescan_work);
2194 * copy the acounting information between qgroups. This is necessary when a
2195 * snapshot or a subvolume is created
2197 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2198 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2199 struct btrfs_qgroup_inherit *inherit)
2204 struct btrfs_root *quota_root = fs_info->quota_root;
2205 struct btrfs_qgroup *srcgroup;
2206 struct btrfs_qgroup *dstgroup;
2210 mutex_lock(&fs_info->qgroup_ioctl_lock);
2211 if (!fs_info->quota_enabled)
2220 i_qgroups = (u64 *)(inherit + 1);
2221 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2222 2 * inherit->num_excl_copies;
2223 for (i = 0; i < nums; ++i) {
2224 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2234 * create a tracking group for the subvol itself
2236 ret = add_qgroup_item(trans, quota_root, objectid);
2241 struct btrfs_root *srcroot;
2242 struct btrfs_key srckey;
2244 srckey.objectid = srcid;
2245 srckey.type = BTRFS_ROOT_ITEM_KEY;
2246 srckey.offset = (u64)-1;
2247 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2248 if (IS_ERR(srcroot)) {
2249 ret = PTR_ERR(srcroot);
2254 level_size = srcroot->nodesize;
2259 * add qgroup to all inherited groups
2262 i_qgroups = (u64 *)(inherit + 1);
2263 for (i = 0; i < inherit->num_qgroups; ++i) {
2264 ret = add_qgroup_relation_item(trans, quota_root,
2265 objectid, *i_qgroups);
2268 ret = add_qgroup_relation_item(trans, quota_root,
2269 *i_qgroups, objectid);
2277 spin_lock(&fs_info->qgroup_lock);
2279 dstgroup = add_qgroup_rb(fs_info, objectid);
2280 if (IS_ERR(dstgroup)) {
2281 ret = PTR_ERR(dstgroup);
2285 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2286 dstgroup->lim_flags = inherit->lim.flags;
2287 dstgroup->max_rfer = inherit->lim.max_rfer;
2288 dstgroup->max_excl = inherit->lim.max_excl;
2289 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2290 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2292 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2294 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2295 btrfs_info(fs_info, "unable to update quota limit for %llu",
2296 dstgroup->qgroupid);
2302 srcgroup = find_qgroup_rb(fs_info, srcid);
2307 * We call inherit after we clone the root in order to make sure
2308 * our counts don't go crazy, so at this point the only
2309 * difference between the two roots should be the root node.
2311 dstgroup->rfer = srcgroup->rfer;
2312 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2313 dstgroup->excl = level_size;
2314 dstgroup->excl_cmpr = level_size;
2315 srcgroup->excl = level_size;
2316 srcgroup->excl_cmpr = level_size;
2318 /* inherit the limit info */
2319 dstgroup->lim_flags = srcgroup->lim_flags;
2320 dstgroup->max_rfer = srcgroup->max_rfer;
2321 dstgroup->max_excl = srcgroup->max_excl;
2322 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2323 dstgroup->rsv_excl = srcgroup->rsv_excl;
2325 qgroup_dirty(fs_info, dstgroup);
2326 qgroup_dirty(fs_info, srcgroup);
2332 i_qgroups = (u64 *)(inherit + 1);
2333 for (i = 0; i < inherit->num_qgroups; ++i) {
2334 ret = add_relation_rb(quota_root->fs_info, objectid,
2341 for (i = 0; i < inherit->num_ref_copies; ++i) {
2342 struct btrfs_qgroup *src;
2343 struct btrfs_qgroup *dst;
2345 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2346 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2353 dst->rfer = src->rfer - level_size;
2354 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2357 for (i = 0; i < inherit->num_excl_copies; ++i) {
2358 struct btrfs_qgroup *src;
2359 struct btrfs_qgroup *dst;
2361 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2362 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2369 dst->excl = src->excl + level_size;
2370 dst->excl_cmpr = src->excl_cmpr + level_size;
2375 spin_unlock(&fs_info->qgroup_lock);
2377 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2382 * reserve some space for a qgroup and all its parents. The reservation takes
2383 * place with start_transaction or dealloc_reserve, similar to ENOSPC
2384 * accounting. If not enough space is available, EDQUOT is returned.
2385 * We assume that the requested space is new for all qgroups.
2387 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2389 struct btrfs_root *quota_root;
2390 struct btrfs_qgroup *qgroup;
2391 struct btrfs_fs_info *fs_info = root->fs_info;
2392 u64 ref_root = root->root_key.objectid;
2394 struct ulist_node *unode;
2395 struct ulist_iterator uiter;
2397 if (!is_fstree(ref_root))
2403 spin_lock(&fs_info->qgroup_lock);
2404 quota_root = fs_info->quota_root;
2408 qgroup = find_qgroup_rb(fs_info, ref_root);
2413 * in a first step, we check all affected qgroups if any limits would
2416 ulist_reinit(fs_info->qgroup_ulist);
2417 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2418 (uintptr_t)qgroup, GFP_ATOMIC);
2421 ULIST_ITER_INIT(&uiter);
2422 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2423 struct btrfs_qgroup *qg;
2424 struct btrfs_qgroup_list *glist;
2426 qg = u64_to_ptr(unode->aux);
2428 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2429 qg->reserved + (s64)qg->rfer + num_bytes >
2435 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2436 qg->reserved + (s64)qg->excl + num_bytes >
2442 list_for_each_entry(glist, &qg->groups, next_group) {
2443 ret = ulist_add(fs_info->qgroup_ulist,
2444 glist->group->qgroupid,
2445 (uintptr_t)glist->group, GFP_ATOMIC);
2452 * no limits exceeded, now record the reservation into all qgroups
2454 ULIST_ITER_INIT(&uiter);
2455 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2456 struct btrfs_qgroup *qg;
2458 qg = u64_to_ptr(unode->aux);
2460 qg->reserved += num_bytes;
2464 spin_unlock(&fs_info->qgroup_lock);
2468 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2470 struct btrfs_root *quota_root;
2471 struct btrfs_qgroup *qgroup;
2472 struct btrfs_fs_info *fs_info = root->fs_info;
2473 struct ulist_node *unode;
2474 struct ulist_iterator uiter;
2475 u64 ref_root = root->root_key.objectid;
2478 if (!is_fstree(ref_root))
2484 spin_lock(&fs_info->qgroup_lock);
2486 quota_root = fs_info->quota_root;
2490 qgroup = find_qgroup_rb(fs_info, ref_root);
2494 ulist_reinit(fs_info->qgroup_ulist);
2495 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2496 (uintptr_t)qgroup, GFP_ATOMIC);
2499 ULIST_ITER_INIT(&uiter);
2500 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2501 struct btrfs_qgroup *qg;
2502 struct btrfs_qgroup_list *glist;
2504 qg = u64_to_ptr(unode->aux);
2506 qg->reserved -= num_bytes;
2508 list_for_each_entry(glist, &qg->groups, next_group) {
2509 ret = ulist_add(fs_info->qgroup_ulist,
2510 glist->group->qgroupid,
2511 (uintptr_t)glist->group, GFP_ATOMIC);
2518 spin_unlock(&fs_info->qgroup_lock);
2521 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2523 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2525 btrfs_err(trans->root->fs_info,
2526 "qgroups not uptodate in trans handle %p: list is%s empty, "
2528 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2529 (u32)(trans->delayed_ref_elem.seq >> 32),
2530 (u32)trans->delayed_ref_elem.seq);
2535 * returns < 0 on error, 0 when more leafs are to be scanned.
2536 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2539 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2540 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2541 struct ulist *tmp, struct extent_buffer *scratch_leaf)
2543 struct btrfs_key found;
2544 struct ulist *roots = NULL;
2545 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2552 path->leave_spinning = 1;
2553 mutex_lock(&fs_info->qgroup_rescan_lock);
2554 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2555 &fs_info->qgroup_rescan_progress,
2558 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2559 fs_info->qgroup_rescan_progress.objectid,
2560 fs_info->qgroup_rescan_progress.type,
2561 fs_info->qgroup_rescan_progress.offset, ret);
2565 * The rescan is about to end, we will not be scanning any
2566 * further blocks. We cannot unset the RESCAN flag here, because
2567 * we want to commit the transaction if everything went well.
2568 * To make the live accounting work in this phase, we set our
2569 * scan progress pointer such that every real extent objectid
2572 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2573 btrfs_release_path(path);
2574 mutex_unlock(&fs_info->qgroup_rescan_lock);
2578 btrfs_item_key_to_cpu(path->nodes[0], &found,
2579 btrfs_header_nritems(path->nodes[0]) - 1);
2580 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2582 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2583 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2584 slot = path->slots[0];
2585 btrfs_release_path(path);
2586 mutex_unlock(&fs_info->qgroup_rescan_lock);
2588 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2589 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2590 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2591 found.type != BTRFS_METADATA_ITEM_KEY)
2593 if (found.type == BTRFS_METADATA_ITEM_KEY)
2594 num_bytes = fs_info->extent_root->nodesize;
2596 num_bytes = found.offset;
2598 ulist_reinit(qgroups);
2599 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2603 spin_lock(&fs_info->qgroup_lock);
2604 seq = fs_info->qgroup_seq;
2605 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2608 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2609 seq, &new_roots, 1);
2611 spin_unlock(&fs_info->qgroup_lock);
2616 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2617 seq, 0, new_roots, 1);
2619 spin_unlock(&fs_info->qgroup_lock);
2623 spin_unlock(&fs_info->qgroup_lock);
2627 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2632 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2634 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2635 qgroup_rescan_work);
2636 struct btrfs_path *path;
2637 struct btrfs_trans_handle *trans = NULL;
2638 struct ulist *tmp = NULL, *qgroups = NULL;
2639 struct extent_buffer *scratch_leaf = NULL;
2642 path = btrfs_alloc_path();
2645 qgroups = ulist_alloc(GFP_NOFS);
2648 tmp = ulist_alloc(GFP_NOFS);
2651 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2657 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2658 if (IS_ERR(trans)) {
2659 err = PTR_ERR(trans);
2662 if (!fs_info->quota_enabled) {
2665 err = qgroup_rescan_leaf(fs_info, path, trans,
2666 qgroups, tmp, scratch_leaf);
2669 btrfs_commit_transaction(trans, fs_info->fs_root);
2671 btrfs_end_transaction(trans, fs_info->fs_root);
2675 kfree(scratch_leaf);
2676 ulist_free(qgroups);
2678 btrfs_free_path(path);
2680 mutex_lock(&fs_info->qgroup_rescan_lock);
2681 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2684 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2685 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2686 } else if (err < 0) {
2687 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2689 mutex_unlock(&fs_info->qgroup_rescan_lock);
2692 btrfs_info(fs_info, "qgroup scan completed%s",
2693 err == 2 ? " (inconsistency flag cleared)" : "");
2695 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2698 complete_all(&fs_info->qgroup_rescan_completion);
2702 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2703 * memory required for the rescan context.
2706 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2712 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2713 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2718 mutex_lock(&fs_info->qgroup_rescan_lock);
2719 spin_lock(&fs_info->qgroup_lock);
2722 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2724 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2728 spin_unlock(&fs_info->qgroup_lock);
2729 mutex_unlock(&fs_info->qgroup_rescan_lock);
2733 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2736 memset(&fs_info->qgroup_rescan_progress, 0,
2737 sizeof(fs_info->qgroup_rescan_progress));
2738 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2740 spin_unlock(&fs_info->qgroup_lock);
2741 mutex_unlock(&fs_info->qgroup_rescan_lock);
2743 init_completion(&fs_info->qgroup_rescan_completion);
2745 memset(&fs_info->qgroup_rescan_work, 0,
2746 sizeof(fs_info->qgroup_rescan_work));
2747 btrfs_init_work(&fs_info->qgroup_rescan_work,
2748 btrfs_qgroup_rescan_helper,
2749 btrfs_qgroup_rescan_worker, NULL, NULL);
2753 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2761 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2764 struct btrfs_qgroup *qgroup;
2766 spin_lock(&fs_info->qgroup_lock);
2767 /* clear all current qgroup tracking information */
2768 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2769 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2771 qgroup->rfer_cmpr = 0;
2773 qgroup->excl_cmpr = 0;
2775 spin_unlock(&fs_info->qgroup_lock);
2779 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2782 struct btrfs_trans_handle *trans;
2784 ret = qgroup_rescan_init(fs_info, 0, 1);
2789 * We have set the rescan_progress to 0, which means no more
2790 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2791 * However, btrfs_qgroup_account_ref may be right after its call
2792 * to btrfs_find_all_roots, in which case it would still do the
2794 * To solve this, we're committing the transaction, which will
2795 * ensure we run all delayed refs and only after that, we are
2796 * going to clear all tracking information for a clean start.
2799 trans = btrfs_join_transaction(fs_info->fs_root);
2800 if (IS_ERR(trans)) {
2801 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2802 return PTR_ERR(trans);
2804 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2806 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2810 qgroup_rescan_zero_tracking(fs_info);
2812 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2813 &fs_info->qgroup_rescan_work);
2818 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2823 mutex_lock(&fs_info->qgroup_rescan_lock);
2824 spin_lock(&fs_info->qgroup_lock);
2825 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2826 spin_unlock(&fs_info->qgroup_lock);
2827 mutex_unlock(&fs_info->qgroup_rescan_lock);
2830 ret = wait_for_completion_interruptible(
2831 &fs_info->qgroup_rescan_completion);
2837 * this is only called from open_ctree where we're still single threaded, thus
2838 * locking is omitted here.
2841 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2843 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2844 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2845 &fs_info->qgroup_rescan_work);