x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / fs / btrfs / qgroup.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34 #include "extent_io.h"
35 #include "qgroup.h"
36
37
38 /* TODO XXX FIXME
39  *  - subvol delete -> delete when ref goes to 0? delete limits also?
40  *  - reorganize keys
41  *  - compressed
42  *  - sync
43  *  - copy also limits on subvol creation
44  *  - limit
45  *  - caches fuer ulists
46  *  - performance benchmarks
47  *  - check all ioctl parameters
48  */
49
50 /*
51  * one struct for each qgroup, organized in fs_info->qgroup_tree.
52  */
53 struct btrfs_qgroup {
54         u64 qgroupid;
55
56         /*
57          * state
58          */
59         u64 rfer;       /* referenced */
60         u64 rfer_cmpr;  /* referenced compressed */
61         u64 excl;       /* exclusive */
62         u64 excl_cmpr;  /* exclusive compressed */
63
64         /*
65          * limits
66          */
67         u64 lim_flags;  /* which limits are set */
68         u64 max_rfer;
69         u64 max_excl;
70         u64 rsv_rfer;
71         u64 rsv_excl;
72
73         /*
74          * reservation tracking
75          */
76         u64 reserved;
77
78         /*
79          * lists
80          */
81         struct list_head groups;  /* groups this group is member of */
82         struct list_head members; /* groups that are members of this group */
83         struct list_head dirty;   /* dirty groups */
84         struct rb_node node;      /* tree of qgroups */
85
86         /*
87          * temp variables for accounting operations
88          * Refer to qgroup_shared_accounting() for details.
89          */
90         u64 old_refcnt;
91         u64 new_refcnt;
92 };
93
94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
95                                            int mod)
96 {
97         if (qg->old_refcnt < seq)
98                 qg->old_refcnt = seq;
99         qg->old_refcnt += mod;
100 }
101
102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
103                                            int mod)
104 {
105         if (qg->new_refcnt < seq)
106                 qg->new_refcnt = seq;
107         qg->new_refcnt += mod;
108 }
109
110 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
111 {
112         if (qg->old_refcnt < seq)
113                 return 0;
114         return qg->old_refcnt - seq;
115 }
116
117 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
118 {
119         if (qg->new_refcnt < seq)
120                 return 0;
121         return qg->new_refcnt - seq;
122 }
123
124 /*
125  * glue structure to represent the relations between qgroups.
126  */
127 struct btrfs_qgroup_list {
128         struct list_head next_group;
129         struct list_head next_member;
130         struct btrfs_qgroup *group;
131         struct btrfs_qgroup *member;
132 };
133
134 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
135 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
136
137 static int
138 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
139                    int init_flags);
140 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
141
142 /* must be called with qgroup_ioctl_lock held */
143 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
144                                            u64 qgroupid)
145 {
146         struct rb_node *n = fs_info->qgroup_tree.rb_node;
147         struct btrfs_qgroup *qgroup;
148
149         while (n) {
150                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
151                 if (qgroup->qgroupid < qgroupid)
152                         n = n->rb_left;
153                 else if (qgroup->qgroupid > qgroupid)
154                         n = n->rb_right;
155                 else
156                         return qgroup;
157         }
158         return NULL;
159 }
160
161 /* must be called with qgroup_lock held */
162 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
163                                           u64 qgroupid)
164 {
165         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
166         struct rb_node *parent = NULL;
167         struct btrfs_qgroup *qgroup;
168
169         while (*p) {
170                 parent = *p;
171                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
172
173                 if (qgroup->qgroupid < qgroupid)
174                         p = &(*p)->rb_left;
175                 else if (qgroup->qgroupid > qgroupid)
176                         p = &(*p)->rb_right;
177                 else
178                         return qgroup;
179         }
180
181         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
182         if (!qgroup)
183                 return ERR_PTR(-ENOMEM);
184
185         qgroup->qgroupid = qgroupid;
186         INIT_LIST_HEAD(&qgroup->groups);
187         INIT_LIST_HEAD(&qgroup->members);
188         INIT_LIST_HEAD(&qgroup->dirty);
189
190         rb_link_node(&qgroup->node, parent, p);
191         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
192
193         return qgroup;
194 }
195
196 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
197 {
198         struct btrfs_qgroup_list *list;
199
200         list_del(&qgroup->dirty);
201         while (!list_empty(&qgroup->groups)) {
202                 list = list_first_entry(&qgroup->groups,
203                                         struct btrfs_qgroup_list, next_group);
204                 list_del(&list->next_group);
205                 list_del(&list->next_member);
206                 kfree(list);
207         }
208
209         while (!list_empty(&qgroup->members)) {
210                 list = list_first_entry(&qgroup->members,
211                                         struct btrfs_qgroup_list, next_member);
212                 list_del(&list->next_group);
213                 list_del(&list->next_member);
214                 kfree(list);
215         }
216         kfree(qgroup);
217 }
218
219 /* must be called with qgroup_lock held */
220 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
221 {
222         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
223
224         if (!qgroup)
225                 return -ENOENT;
226
227         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
228         __del_qgroup_rb(qgroup);
229         return 0;
230 }
231
232 /* must be called with qgroup_lock held */
233 static int add_relation_rb(struct btrfs_fs_info *fs_info,
234                            u64 memberid, u64 parentid)
235 {
236         struct btrfs_qgroup *member;
237         struct btrfs_qgroup *parent;
238         struct btrfs_qgroup_list *list;
239
240         member = find_qgroup_rb(fs_info, memberid);
241         parent = find_qgroup_rb(fs_info, parentid);
242         if (!member || !parent)
243                 return -ENOENT;
244
245         list = kzalloc(sizeof(*list), GFP_ATOMIC);
246         if (!list)
247                 return -ENOMEM;
248
249         list->group = parent;
250         list->member = member;
251         list_add_tail(&list->next_group, &member->groups);
252         list_add_tail(&list->next_member, &parent->members);
253
254         return 0;
255 }
256
257 /* must be called with qgroup_lock held */
258 static int del_relation_rb(struct btrfs_fs_info *fs_info,
259                            u64 memberid, u64 parentid)
260 {
261         struct btrfs_qgroup *member;
262         struct btrfs_qgroup *parent;
263         struct btrfs_qgroup_list *list;
264
265         member = find_qgroup_rb(fs_info, memberid);
266         parent = find_qgroup_rb(fs_info, parentid);
267         if (!member || !parent)
268                 return -ENOENT;
269
270         list_for_each_entry(list, &member->groups, next_group) {
271                 if (list->group == parent) {
272                         list_del(&list->next_group);
273                         list_del(&list->next_member);
274                         kfree(list);
275                         return 0;
276                 }
277         }
278         return -ENOENT;
279 }
280
281 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
282 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
283                                u64 rfer, u64 excl)
284 {
285         struct btrfs_qgroup *qgroup;
286
287         qgroup = find_qgroup_rb(fs_info, qgroupid);
288         if (!qgroup)
289                 return -EINVAL;
290         if (qgroup->rfer != rfer || qgroup->excl != excl)
291                 return -EINVAL;
292         return 0;
293 }
294 #endif
295
296 /*
297  * The full config is read in one go, only called from open_ctree()
298  * It doesn't use any locking, as at this point we're still single-threaded
299  */
300 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
301 {
302         struct btrfs_key key;
303         struct btrfs_key found_key;
304         struct btrfs_root *quota_root = fs_info->quota_root;
305         struct btrfs_path *path = NULL;
306         struct extent_buffer *l;
307         int slot;
308         int ret = 0;
309         u64 flags = 0;
310         u64 rescan_progress = 0;
311
312         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
313                 return 0;
314
315         fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
316         if (!fs_info->qgroup_ulist) {
317                 ret = -ENOMEM;
318                 goto out;
319         }
320
321         path = btrfs_alloc_path();
322         if (!path) {
323                 ret = -ENOMEM;
324                 goto out;
325         }
326
327         /* default this to quota off, in case no status key is found */
328         fs_info->qgroup_flags = 0;
329
330         /*
331          * pass 1: read status, all qgroup infos and limits
332          */
333         key.objectid = 0;
334         key.type = 0;
335         key.offset = 0;
336         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
337         if (ret)
338                 goto out;
339
340         while (1) {
341                 struct btrfs_qgroup *qgroup;
342
343                 slot = path->slots[0];
344                 l = path->nodes[0];
345                 btrfs_item_key_to_cpu(l, &found_key, slot);
346
347                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
348                         struct btrfs_qgroup_status_item *ptr;
349
350                         ptr = btrfs_item_ptr(l, slot,
351                                              struct btrfs_qgroup_status_item);
352
353                         if (btrfs_qgroup_status_version(l, ptr) !=
354                             BTRFS_QGROUP_STATUS_VERSION) {
355                                 btrfs_err(fs_info,
356                                  "old qgroup version, quota disabled");
357                                 goto out;
358                         }
359                         if (btrfs_qgroup_status_generation(l, ptr) !=
360                             fs_info->generation) {
361                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
362                                 btrfs_err(fs_info,
363                                         "qgroup generation mismatch, marked as inconsistent");
364                         }
365                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
366                                                                           ptr);
367                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
368                         goto next1;
369                 }
370
371                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
372                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
373                         goto next1;
374
375                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
376                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
377                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
378                         btrfs_err(fs_info, "inconsistent qgroup config");
379                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
380                 }
381                 if (!qgroup) {
382                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
383                         if (IS_ERR(qgroup)) {
384                                 ret = PTR_ERR(qgroup);
385                                 goto out;
386                         }
387                 }
388                 switch (found_key.type) {
389                 case BTRFS_QGROUP_INFO_KEY: {
390                         struct btrfs_qgroup_info_item *ptr;
391
392                         ptr = btrfs_item_ptr(l, slot,
393                                              struct btrfs_qgroup_info_item);
394                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
395                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
396                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
397                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
398                         /* generation currently unused */
399                         break;
400                 }
401                 case BTRFS_QGROUP_LIMIT_KEY: {
402                         struct btrfs_qgroup_limit_item *ptr;
403
404                         ptr = btrfs_item_ptr(l, slot,
405                                              struct btrfs_qgroup_limit_item);
406                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
407                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
408                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
409                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
410                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
411                         break;
412                 }
413                 }
414 next1:
415                 ret = btrfs_next_item(quota_root, path);
416                 if (ret < 0)
417                         goto out;
418                 if (ret)
419                         break;
420         }
421         btrfs_release_path(path);
422
423         /*
424          * pass 2: read all qgroup relations
425          */
426         key.objectid = 0;
427         key.type = BTRFS_QGROUP_RELATION_KEY;
428         key.offset = 0;
429         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
430         if (ret)
431                 goto out;
432         while (1) {
433                 slot = path->slots[0];
434                 l = path->nodes[0];
435                 btrfs_item_key_to_cpu(l, &found_key, slot);
436
437                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
438                         goto next2;
439
440                 if (found_key.objectid > found_key.offset) {
441                         /* parent <- member, not needed to build config */
442                         /* FIXME should we omit the key completely? */
443                         goto next2;
444                 }
445
446                 ret = add_relation_rb(fs_info, found_key.objectid,
447                                       found_key.offset);
448                 if (ret == -ENOENT) {
449                         btrfs_warn(fs_info,
450                                 "orphan qgroup relation 0x%llx->0x%llx",
451                                 found_key.objectid, found_key.offset);
452                         ret = 0;        /* ignore the error */
453                 }
454                 if (ret)
455                         goto out;
456 next2:
457                 ret = btrfs_next_item(quota_root, path);
458                 if (ret < 0)
459                         goto out;
460                 if (ret)
461                         break;
462         }
463 out:
464         fs_info->qgroup_flags |= flags;
465         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
466                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
467         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
468                  ret >= 0)
469                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
470         btrfs_free_path(path);
471
472         if (ret < 0) {
473                 ulist_free(fs_info->qgroup_ulist);
474                 fs_info->qgroup_ulist = NULL;
475                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
476         }
477
478         return ret < 0 ? ret : 0;
479 }
480
481 /*
482  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
483  * first two are in single-threaded paths.And for the third one, we have set
484  * quota_root to be null with qgroup_lock held before, so it is safe to clean
485  * up the in-memory structures without qgroup_lock held.
486  */
487 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
488 {
489         struct rb_node *n;
490         struct btrfs_qgroup *qgroup;
491
492         while ((n = rb_first(&fs_info->qgroup_tree))) {
493                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
494                 rb_erase(n, &fs_info->qgroup_tree);
495                 __del_qgroup_rb(qgroup);
496         }
497         /*
498          * we call btrfs_free_qgroup_config() when umounting
499          * filesystem and disabling quota, so we set qgroup_ulist
500          * to be null here to avoid double free.
501          */
502         ulist_free(fs_info->qgroup_ulist);
503         fs_info->qgroup_ulist = NULL;
504 }
505
506 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
507                                     struct btrfs_root *quota_root,
508                                     u64 src, u64 dst)
509 {
510         int ret;
511         struct btrfs_path *path;
512         struct btrfs_key key;
513
514         path = btrfs_alloc_path();
515         if (!path)
516                 return -ENOMEM;
517
518         key.objectid = src;
519         key.type = BTRFS_QGROUP_RELATION_KEY;
520         key.offset = dst;
521
522         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
523
524         btrfs_mark_buffer_dirty(path->nodes[0]);
525
526         btrfs_free_path(path);
527         return ret;
528 }
529
530 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
531                                     struct btrfs_root *quota_root,
532                                     u64 src, u64 dst)
533 {
534         int ret;
535         struct btrfs_path *path;
536         struct btrfs_key key;
537
538         path = btrfs_alloc_path();
539         if (!path)
540                 return -ENOMEM;
541
542         key.objectid = src;
543         key.type = BTRFS_QGROUP_RELATION_KEY;
544         key.offset = dst;
545
546         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
547         if (ret < 0)
548                 goto out;
549
550         if (ret > 0) {
551                 ret = -ENOENT;
552                 goto out;
553         }
554
555         ret = btrfs_del_item(trans, quota_root, path);
556 out:
557         btrfs_free_path(path);
558         return ret;
559 }
560
561 static int add_qgroup_item(struct btrfs_trans_handle *trans,
562                            struct btrfs_root *quota_root, u64 qgroupid)
563 {
564         int ret;
565         struct btrfs_path *path;
566         struct btrfs_qgroup_info_item *qgroup_info;
567         struct btrfs_qgroup_limit_item *qgroup_limit;
568         struct extent_buffer *leaf;
569         struct btrfs_key key;
570
571         if (btrfs_is_testing(quota_root->fs_info))
572                 return 0;
573
574         path = btrfs_alloc_path();
575         if (!path)
576                 return -ENOMEM;
577
578         key.objectid = 0;
579         key.type = BTRFS_QGROUP_INFO_KEY;
580         key.offset = qgroupid;
581
582         /*
583          * Avoid a transaction abort by catching -EEXIST here. In that
584          * case, we proceed by re-initializing the existing structure
585          * on disk.
586          */
587
588         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
589                                       sizeof(*qgroup_info));
590         if (ret && ret != -EEXIST)
591                 goto out;
592
593         leaf = path->nodes[0];
594         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
595                                  struct btrfs_qgroup_info_item);
596         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
597         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
598         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
599         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
600         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
601
602         btrfs_mark_buffer_dirty(leaf);
603
604         btrfs_release_path(path);
605
606         key.type = BTRFS_QGROUP_LIMIT_KEY;
607         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
608                                       sizeof(*qgroup_limit));
609         if (ret && ret != -EEXIST)
610                 goto out;
611
612         leaf = path->nodes[0];
613         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
614                                   struct btrfs_qgroup_limit_item);
615         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
616         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
617         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
618         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
619         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
620
621         btrfs_mark_buffer_dirty(leaf);
622
623         ret = 0;
624 out:
625         btrfs_free_path(path);
626         return ret;
627 }
628
629 static int del_qgroup_item(struct btrfs_trans_handle *trans,
630                            struct btrfs_root *quota_root, u64 qgroupid)
631 {
632         int ret;
633         struct btrfs_path *path;
634         struct btrfs_key key;
635
636         path = btrfs_alloc_path();
637         if (!path)
638                 return -ENOMEM;
639
640         key.objectid = 0;
641         key.type = BTRFS_QGROUP_INFO_KEY;
642         key.offset = qgroupid;
643         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
644         if (ret < 0)
645                 goto out;
646
647         if (ret > 0) {
648                 ret = -ENOENT;
649                 goto out;
650         }
651
652         ret = btrfs_del_item(trans, quota_root, path);
653         if (ret)
654                 goto out;
655
656         btrfs_release_path(path);
657
658         key.type = BTRFS_QGROUP_LIMIT_KEY;
659         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
660         if (ret < 0)
661                 goto out;
662
663         if (ret > 0) {
664                 ret = -ENOENT;
665                 goto out;
666         }
667
668         ret = btrfs_del_item(trans, quota_root, path);
669
670 out:
671         btrfs_free_path(path);
672         return ret;
673 }
674
675 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
676                                     struct btrfs_root *root,
677                                     struct btrfs_qgroup *qgroup)
678 {
679         struct btrfs_path *path;
680         struct btrfs_key key;
681         struct extent_buffer *l;
682         struct btrfs_qgroup_limit_item *qgroup_limit;
683         int ret;
684         int slot;
685
686         key.objectid = 0;
687         key.type = BTRFS_QGROUP_LIMIT_KEY;
688         key.offset = qgroup->qgroupid;
689
690         path = btrfs_alloc_path();
691         if (!path)
692                 return -ENOMEM;
693
694         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
695         if (ret > 0)
696                 ret = -ENOENT;
697
698         if (ret)
699                 goto out;
700
701         l = path->nodes[0];
702         slot = path->slots[0];
703         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
704         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
705         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
706         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
707         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
708         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
709
710         btrfs_mark_buffer_dirty(l);
711
712 out:
713         btrfs_free_path(path);
714         return ret;
715 }
716
717 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
718                                    struct btrfs_root *root,
719                                    struct btrfs_qgroup *qgroup)
720 {
721         struct btrfs_path *path;
722         struct btrfs_key key;
723         struct extent_buffer *l;
724         struct btrfs_qgroup_info_item *qgroup_info;
725         int ret;
726         int slot;
727
728         if (btrfs_is_testing(root->fs_info))
729                 return 0;
730
731         key.objectid = 0;
732         key.type = BTRFS_QGROUP_INFO_KEY;
733         key.offset = qgroup->qgroupid;
734
735         path = btrfs_alloc_path();
736         if (!path)
737                 return -ENOMEM;
738
739         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
740         if (ret > 0)
741                 ret = -ENOENT;
742
743         if (ret)
744                 goto out;
745
746         l = path->nodes[0];
747         slot = path->slots[0];
748         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
749         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
750         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
751         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
752         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
753         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
754
755         btrfs_mark_buffer_dirty(l);
756
757 out:
758         btrfs_free_path(path);
759         return ret;
760 }
761
762 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
763                                      struct btrfs_fs_info *fs_info,
764                                     struct btrfs_root *root)
765 {
766         struct btrfs_path *path;
767         struct btrfs_key key;
768         struct extent_buffer *l;
769         struct btrfs_qgroup_status_item *ptr;
770         int ret;
771         int slot;
772
773         key.objectid = 0;
774         key.type = BTRFS_QGROUP_STATUS_KEY;
775         key.offset = 0;
776
777         path = btrfs_alloc_path();
778         if (!path)
779                 return -ENOMEM;
780
781         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
782         if (ret > 0)
783                 ret = -ENOENT;
784
785         if (ret)
786                 goto out;
787
788         l = path->nodes[0];
789         slot = path->slots[0];
790         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
791         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
792         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
793         btrfs_set_qgroup_status_rescan(l, ptr,
794                                 fs_info->qgroup_rescan_progress.objectid);
795
796         btrfs_mark_buffer_dirty(l);
797
798 out:
799         btrfs_free_path(path);
800         return ret;
801 }
802
803 /*
804  * called with qgroup_lock held
805  */
806 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
807                                   struct btrfs_root *root)
808 {
809         struct btrfs_path *path;
810         struct btrfs_key key;
811         struct extent_buffer *leaf = NULL;
812         int ret;
813         int nr = 0;
814
815         path = btrfs_alloc_path();
816         if (!path)
817                 return -ENOMEM;
818
819         path->leave_spinning = 1;
820
821         key.objectid = 0;
822         key.offset = 0;
823         key.type = 0;
824
825         while (1) {
826                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
827                 if (ret < 0)
828                         goto out;
829                 leaf = path->nodes[0];
830                 nr = btrfs_header_nritems(leaf);
831                 if (!nr)
832                         break;
833                 /*
834                  * delete the leaf one by one
835                  * since the whole tree is going
836                  * to be deleted.
837                  */
838                 path->slots[0] = 0;
839                 ret = btrfs_del_items(trans, root, path, 0, nr);
840                 if (ret)
841                         goto out;
842
843                 btrfs_release_path(path);
844         }
845         ret = 0;
846 out:
847         set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
848         btrfs_free_path(path);
849         return ret;
850 }
851
852 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
853                        struct btrfs_fs_info *fs_info)
854 {
855         struct btrfs_root *quota_root;
856         struct btrfs_root *tree_root = fs_info->tree_root;
857         struct btrfs_path *path = NULL;
858         struct btrfs_qgroup_status_item *ptr;
859         struct extent_buffer *leaf;
860         struct btrfs_key key;
861         struct btrfs_key found_key;
862         struct btrfs_qgroup *qgroup = NULL;
863         int ret = 0;
864         int slot;
865
866         mutex_lock(&fs_info->qgroup_ioctl_lock);
867         if (fs_info->quota_root) {
868                 set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
869                 goto out;
870         }
871
872         fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
873         if (!fs_info->qgroup_ulist) {
874                 ret = -ENOMEM;
875                 goto out;
876         }
877
878         /*
879          * initially create the quota tree
880          */
881         quota_root = btrfs_create_tree(trans, fs_info,
882                                        BTRFS_QUOTA_TREE_OBJECTID);
883         if (IS_ERR(quota_root)) {
884                 ret =  PTR_ERR(quota_root);
885                 goto out;
886         }
887
888         path = btrfs_alloc_path();
889         if (!path) {
890                 ret = -ENOMEM;
891                 goto out_free_root;
892         }
893
894         key.objectid = 0;
895         key.type = BTRFS_QGROUP_STATUS_KEY;
896         key.offset = 0;
897
898         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
899                                       sizeof(*ptr));
900         if (ret)
901                 goto out_free_path;
902
903         leaf = path->nodes[0];
904         ptr = btrfs_item_ptr(leaf, path->slots[0],
905                                  struct btrfs_qgroup_status_item);
906         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
907         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
908         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
909                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
910         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
911         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
912
913         btrfs_mark_buffer_dirty(leaf);
914
915         key.objectid = 0;
916         key.type = BTRFS_ROOT_REF_KEY;
917         key.offset = 0;
918
919         btrfs_release_path(path);
920         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
921         if (ret > 0)
922                 goto out_add_root;
923         if (ret < 0)
924                 goto out_free_path;
925
926
927         while (1) {
928                 slot = path->slots[0];
929                 leaf = path->nodes[0];
930                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
931
932                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
933                         ret = add_qgroup_item(trans, quota_root,
934                                               found_key.offset);
935                         if (ret)
936                                 goto out_free_path;
937
938                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
939                         if (IS_ERR(qgroup)) {
940                                 ret = PTR_ERR(qgroup);
941                                 goto out_free_path;
942                         }
943                 }
944                 ret = btrfs_next_item(tree_root, path);
945                 if (ret < 0)
946                         goto out_free_path;
947                 if (ret)
948                         break;
949         }
950
951 out_add_root:
952         btrfs_release_path(path);
953         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
954         if (ret)
955                 goto out_free_path;
956
957         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
958         if (IS_ERR(qgroup)) {
959                 ret = PTR_ERR(qgroup);
960                 goto out_free_path;
961         }
962         spin_lock(&fs_info->qgroup_lock);
963         fs_info->quota_root = quota_root;
964         set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
965         spin_unlock(&fs_info->qgroup_lock);
966 out_free_path:
967         btrfs_free_path(path);
968 out_free_root:
969         if (ret) {
970                 free_extent_buffer(quota_root->node);
971                 free_extent_buffer(quota_root->commit_root);
972                 kfree(quota_root);
973         }
974 out:
975         if (ret) {
976                 ulist_free(fs_info->qgroup_ulist);
977                 fs_info->qgroup_ulist = NULL;
978         }
979         mutex_unlock(&fs_info->qgroup_ioctl_lock);
980         return ret;
981 }
982
983 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
984                         struct btrfs_fs_info *fs_info)
985 {
986         struct btrfs_root *tree_root = fs_info->tree_root;
987         struct btrfs_root *quota_root;
988         int ret = 0;
989
990         mutex_lock(&fs_info->qgroup_ioctl_lock);
991         if (!fs_info->quota_root)
992                 goto out;
993         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
994         set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
995         btrfs_qgroup_wait_for_completion(fs_info, false);
996         spin_lock(&fs_info->qgroup_lock);
997         quota_root = fs_info->quota_root;
998         fs_info->quota_root = NULL;
999         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1000         spin_unlock(&fs_info->qgroup_lock);
1001
1002         btrfs_free_qgroup_config(fs_info);
1003
1004         ret = btrfs_clean_quota_tree(trans, quota_root);
1005         if (ret)
1006                 goto out;
1007
1008         ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
1009         if (ret)
1010                 goto out;
1011
1012         list_del(&quota_root->dirty_list);
1013
1014         btrfs_tree_lock(quota_root->node);
1015         clean_tree_block(trans, tree_root->fs_info, quota_root->node);
1016         btrfs_tree_unlock(quota_root->node);
1017         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1018
1019         free_extent_buffer(quota_root->node);
1020         free_extent_buffer(quota_root->commit_root);
1021         kfree(quota_root);
1022 out:
1023         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1024         return ret;
1025 }
1026
1027 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1028                          struct btrfs_qgroup *qgroup)
1029 {
1030         if (list_empty(&qgroup->dirty))
1031                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1032 }
1033
1034 /*
1035  * The easy accounting, if we are adding/removing the only ref for an extent
1036  * then this qgroup and all of the parent qgroups get their reference and
1037  * exclusive counts adjusted.
1038  *
1039  * Caller should hold fs_info->qgroup_lock.
1040  */
1041 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1042                                     struct ulist *tmp, u64 ref_root,
1043                                     u64 num_bytes, int sign)
1044 {
1045         struct btrfs_qgroup *qgroup;
1046         struct btrfs_qgroup_list *glist;
1047         struct ulist_node *unode;
1048         struct ulist_iterator uiter;
1049         int ret = 0;
1050
1051         qgroup = find_qgroup_rb(fs_info, ref_root);
1052         if (!qgroup)
1053                 goto out;
1054
1055         qgroup->rfer += sign * num_bytes;
1056         qgroup->rfer_cmpr += sign * num_bytes;
1057
1058         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1059         qgroup->excl += sign * num_bytes;
1060         qgroup->excl_cmpr += sign * num_bytes;
1061         if (sign > 0)
1062                 qgroup->reserved -= num_bytes;
1063
1064         qgroup_dirty(fs_info, qgroup);
1065
1066         /* Get all of the parent groups that contain this qgroup */
1067         list_for_each_entry(glist, &qgroup->groups, next_group) {
1068                 ret = ulist_add(tmp, glist->group->qgroupid,
1069                                 ptr_to_u64(glist->group), GFP_ATOMIC);
1070                 if (ret < 0)
1071                         goto out;
1072         }
1073
1074         /* Iterate all of the parents and adjust their reference counts */
1075         ULIST_ITER_INIT(&uiter);
1076         while ((unode = ulist_next(tmp, &uiter))) {
1077                 qgroup = u64_to_ptr(unode->aux);
1078                 qgroup->rfer += sign * num_bytes;
1079                 qgroup->rfer_cmpr += sign * num_bytes;
1080                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1081                 qgroup->excl += sign * num_bytes;
1082                 if (sign > 0)
1083                         qgroup->reserved -= num_bytes;
1084                 qgroup->excl_cmpr += sign * num_bytes;
1085                 qgroup_dirty(fs_info, qgroup);
1086
1087                 /* Add any parents of the parents */
1088                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1089                         ret = ulist_add(tmp, glist->group->qgroupid,
1090                                         ptr_to_u64(glist->group), GFP_ATOMIC);
1091                         if (ret < 0)
1092                                 goto out;
1093                 }
1094         }
1095         ret = 0;
1096 out:
1097         return ret;
1098 }
1099
1100
1101 /*
1102  * Quick path for updating qgroup with only excl refs.
1103  *
1104  * In that case, just update all parent will be enough.
1105  * Or we needs to do a full rescan.
1106  * Caller should also hold fs_info->qgroup_lock.
1107  *
1108  * Return 0 for quick update, return >0 for need to full rescan
1109  * and mark INCONSISTENT flag.
1110  * Return < 0 for other error.
1111  */
1112 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1113                                    struct ulist *tmp, u64 src, u64 dst,
1114                                    int sign)
1115 {
1116         struct btrfs_qgroup *qgroup;
1117         int ret = 1;
1118         int err = 0;
1119
1120         qgroup = find_qgroup_rb(fs_info, src);
1121         if (!qgroup)
1122                 goto out;
1123         if (qgroup->excl == qgroup->rfer) {
1124                 ret = 0;
1125                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1126                                                qgroup->excl, sign);
1127                 if (err < 0) {
1128                         ret = err;
1129                         goto out;
1130                 }
1131         }
1132 out:
1133         if (ret)
1134                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1135         return ret;
1136 }
1137
1138 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1139                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1140 {
1141         struct btrfs_root *quota_root;
1142         struct btrfs_qgroup *parent;
1143         struct btrfs_qgroup *member;
1144         struct btrfs_qgroup_list *list;
1145         struct ulist *tmp;
1146         int ret = 0;
1147
1148         /* Check the level of src and dst first */
1149         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1150                 return -EINVAL;
1151
1152         tmp = ulist_alloc(GFP_NOFS);
1153         if (!tmp)
1154                 return -ENOMEM;
1155
1156         mutex_lock(&fs_info->qgroup_ioctl_lock);
1157         quota_root = fs_info->quota_root;
1158         if (!quota_root) {
1159                 ret = -EINVAL;
1160                 goto out;
1161         }
1162         member = find_qgroup_rb(fs_info, src);
1163         parent = find_qgroup_rb(fs_info, dst);
1164         if (!member || !parent) {
1165                 ret = -EINVAL;
1166                 goto out;
1167         }
1168
1169         /* check if such qgroup relation exist firstly */
1170         list_for_each_entry(list, &member->groups, next_group) {
1171                 if (list->group == parent) {
1172                         ret = -EEXIST;
1173                         goto out;
1174                 }
1175         }
1176
1177         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1178         if (ret)
1179                 goto out;
1180
1181         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1182         if (ret) {
1183                 del_qgroup_relation_item(trans, quota_root, src, dst);
1184                 goto out;
1185         }
1186
1187         spin_lock(&fs_info->qgroup_lock);
1188         ret = add_relation_rb(quota_root->fs_info, src, dst);
1189         if (ret < 0) {
1190                 spin_unlock(&fs_info->qgroup_lock);
1191                 goto out;
1192         }
1193         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1194         spin_unlock(&fs_info->qgroup_lock);
1195 out:
1196         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1197         ulist_free(tmp);
1198         return ret;
1199 }
1200
1201 int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1202                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1203 {
1204         struct btrfs_root *quota_root;
1205         struct btrfs_qgroup *parent;
1206         struct btrfs_qgroup *member;
1207         struct btrfs_qgroup_list *list;
1208         struct ulist *tmp;
1209         int ret = 0;
1210         int err;
1211
1212         tmp = ulist_alloc(GFP_NOFS);
1213         if (!tmp)
1214                 return -ENOMEM;
1215
1216         quota_root = fs_info->quota_root;
1217         if (!quota_root) {
1218                 ret = -EINVAL;
1219                 goto out;
1220         }
1221
1222         member = find_qgroup_rb(fs_info, src);
1223         parent = find_qgroup_rb(fs_info, dst);
1224         if (!member || !parent) {
1225                 ret = -EINVAL;
1226                 goto out;
1227         }
1228
1229         /* check if such qgroup relation exist firstly */
1230         list_for_each_entry(list, &member->groups, next_group) {
1231                 if (list->group == parent)
1232                         goto exist;
1233         }
1234         ret = -ENOENT;
1235         goto out;
1236 exist:
1237         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1238         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1239         if (err && !ret)
1240                 ret = err;
1241
1242         spin_lock(&fs_info->qgroup_lock);
1243         del_relation_rb(fs_info, src, dst);
1244         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1245         spin_unlock(&fs_info->qgroup_lock);
1246 out:
1247         ulist_free(tmp);
1248         return ret;
1249 }
1250
1251 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1252                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1253 {
1254         int ret = 0;
1255
1256         mutex_lock(&fs_info->qgroup_ioctl_lock);
1257         ret = __del_qgroup_relation(trans, fs_info, src, dst);
1258         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1259
1260         return ret;
1261 }
1262
1263 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1264                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1265 {
1266         struct btrfs_root *quota_root;
1267         struct btrfs_qgroup *qgroup;
1268         int ret = 0;
1269
1270         mutex_lock(&fs_info->qgroup_ioctl_lock);
1271         quota_root = fs_info->quota_root;
1272         if (!quota_root) {
1273                 ret = -EINVAL;
1274                 goto out;
1275         }
1276         qgroup = find_qgroup_rb(fs_info, qgroupid);
1277         if (qgroup) {
1278                 ret = -EEXIST;
1279                 goto out;
1280         }
1281
1282         ret = add_qgroup_item(trans, quota_root, qgroupid);
1283         if (ret)
1284                 goto out;
1285
1286         spin_lock(&fs_info->qgroup_lock);
1287         qgroup = add_qgroup_rb(fs_info, qgroupid);
1288         spin_unlock(&fs_info->qgroup_lock);
1289
1290         if (IS_ERR(qgroup))
1291                 ret = PTR_ERR(qgroup);
1292 out:
1293         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1294         return ret;
1295 }
1296
1297 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1298                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1299 {
1300         struct btrfs_root *quota_root;
1301         struct btrfs_qgroup *qgroup;
1302         struct btrfs_qgroup_list *list;
1303         int ret = 0;
1304
1305         mutex_lock(&fs_info->qgroup_ioctl_lock);
1306         quota_root = fs_info->quota_root;
1307         if (!quota_root) {
1308                 ret = -EINVAL;
1309                 goto out;
1310         }
1311
1312         qgroup = find_qgroup_rb(fs_info, qgroupid);
1313         if (!qgroup) {
1314                 ret = -ENOENT;
1315                 goto out;
1316         } else {
1317                 /* check if there are no children of this qgroup */
1318                 if (!list_empty(&qgroup->members)) {
1319                         ret = -EBUSY;
1320                         goto out;
1321                 }
1322         }
1323         ret = del_qgroup_item(trans, quota_root, qgroupid);
1324
1325         while (!list_empty(&qgroup->groups)) {
1326                 list = list_first_entry(&qgroup->groups,
1327                                         struct btrfs_qgroup_list, next_group);
1328                 ret = __del_qgroup_relation(trans, fs_info,
1329                                            qgroupid,
1330                                            list->group->qgroupid);
1331                 if (ret)
1332                         goto out;
1333         }
1334
1335         spin_lock(&fs_info->qgroup_lock);
1336         del_qgroup_rb(quota_root->fs_info, qgroupid);
1337         spin_unlock(&fs_info->qgroup_lock);
1338 out:
1339         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1340         return ret;
1341 }
1342
1343 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1344                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1345                        struct btrfs_qgroup_limit *limit)
1346 {
1347         struct btrfs_root *quota_root;
1348         struct btrfs_qgroup *qgroup;
1349         int ret = 0;
1350         /* Sometimes we would want to clear the limit on this qgroup.
1351          * To meet this requirement, we treat the -1 as a special value
1352          * which tell kernel to clear the limit on this qgroup.
1353          */
1354         const u64 CLEAR_VALUE = -1;
1355
1356         mutex_lock(&fs_info->qgroup_ioctl_lock);
1357         quota_root = fs_info->quota_root;
1358         if (!quota_root) {
1359                 ret = -EINVAL;
1360                 goto out;
1361         }
1362
1363         qgroup = find_qgroup_rb(fs_info, qgroupid);
1364         if (!qgroup) {
1365                 ret = -ENOENT;
1366                 goto out;
1367         }
1368
1369         spin_lock(&fs_info->qgroup_lock);
1370         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1371                 if (limit->max_rfer == CLEAR_VALUE) {
1372                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1373                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1374                         qgroup->max_rfer = 0;
1375                 } else {
1376                         qgroup->max_rfer = limit->max_rfer;
1377                 }
1378         }
1379         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1380                 if (limit->max_excl == CLEAR_VALUE) {
1381                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1382                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1383                         qgroup->max_excl = 0;
1384                 } else {
1385                         qgroup->max_excl = limit->max_excl;
1386                 }
1387         }
1388         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1389                 if (limit->rsv_rfer == CLEAR_VALUE) {
1390                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1391                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1392                         qgroup->rsv_rfer = 0;
1393                 } else {
1394                         qgroup->rsv_rfer = limit->rsv_rfer;
1395                 }
1396         }
1397         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1398                 if (limit->rsv_excl == CLEAR_VALUE) {
1399                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1400                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1401                         qgroup->rsv_excl = 0;
1402                 } else {
1403                         qgroup->rsv_excl = limit->rsv_excl;
1404                 }
1405         }
1406         qgroup->lim_flags |= limit->flags;
1407
1408         spin_unlock(&fs_info->qgroup_lock);
1409
1410         ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1411         if (ret) {
1412                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1413                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1414                        qgroupid);
1415         }
1416
1417 out:
1418         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1419         return ret;
1420 }
1421
1422 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1423                                          struct btrfs_fs_info *fs_info)
1424 {
1425         struct btrfs_qgroup_extent_record *record;
1426         struct btrfs_delayed_ref_root *delayed_refs;
1427         struct rb_node *node;
1428         u64 qgroup_to_skip;
1429         int ret = 0;
1430
1431         delayed_refs = &trans->transaction->delayed_refs;
1432         qgroup_to_skip = delayed_refs->qgroup_to_skip;
1433
1434         /*
1435          * No need to do lock, since this function will only be called in
1436          * btrfs_commit_transaction().
1437          */
1438         node = rb_first(&delayed_refs->dirty_extent_root);
1439         while (node) {
1440                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1441                                   node);
1442                 ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0,
1443                                            &record->old_roots);
1444                 if (ret < 0)
1445                         break;
1446                 if (qgroup_to_skip)
1447                         ulist_del(record->old_roots, qgroup_to_skip, 0);
1448                 node = rb_next(node);
1449         }
1450         return ret;
1451 }
1452
1453 int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
1454                                 struct btrfs_delayed_ref_root *delayed_refs,
1455                                 struct btrfs_qgroup_extent_record *record)
1456 {
1457         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1458         struct rb_node *parent_node = NULL;
1459         struct btrfs_qgroup_extent_record *entry;
1460         u64 bytenr = record->bytenr;
1461
1462         assert_spin_locked(&delayed_refs->lock);
1463         trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
1464
1465         while (*p) {
1466                 parent_node = *p;
1467                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1468                                  node);
1469                 if (bytenr < entry->bytenr)
1470                         p = &(*p)->rb_left;
1471                 else if (bytenr > entry->bytenr)
1472                         p = &(*p)->rb_right;
1473                 else
1474                         return 1;
1475         }
1476
1477         rb_link_node(&record->node, parent_node, p);
1478         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1479         return 0;
1480 }
1481
1482 int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
1483                 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1484                 gfp_t gfp_flag)
1485 {
1486         struct btrfs_qgroup_extent_record *record;
1487         struct btrfs_delayed_ref_root *delayed_refs;
1488         int ret;
1489
1490         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1491             || bytenr == 0 || num_bytes == 0)
1492                 return 0;
1493         if (WARN_ON(trans == NULL))
1494                 return -EINVAL;
1495         record = kmalloc(sizeof(*record), gfp_flag);
1496         if (!record)
1497                 return -ENOMEM;
1498
1499         delayed_refs = &trans->transaction->delayed_refs;
1500         record->bytenr = bytenr;
1501         record->num_bytes = num_bytes;
1502         record->old_roots = NULL;
1503
1504         spin_lock(&delayed_refs->lock);
1505         ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
1506                                                       record);
1507         spin_unlock(&delayed_refs->lock);
1508         if (ret > 0)
1509                 kfree(record);
1510         return 0;
1511 }
1512
1513 #define UPDATE_NEW      0
1514 #define UPDATE_OLD      1
1515 /*
1516  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1517  */
1518 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1519                                 struct ulist *roots, struct ulist *tmp,
1520                                 struct ulist *qgroups, u64 seq, int update_old)
1521 {
1522         struct ulist_node *unode;
1523         struct ulist_iterator uiter;
1524         struct ulist_node *tmp_unode;
1525         struct ulist_iterator tmp_uiter;
1526         struct btrfs_qgroup *qg;
1527         int ret = 0;
1528
1529         if (!roots)
1530                 return 0;
1531         ULIST_ITER_INIT(&uiter);
1532         while ((unode = ulist_next(roots, &uiter))) {
1533                 qg = find_qgroup_rb(fs_info, unode->val);
1534                 if (!qg)
1535                         continue;
1536
1537                 ulist_reinit(tmp);
1538                 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1539                                 GFP_ATOMIC);
1540                 if (ret < 0)
1541                         return ret;
1542                 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1543                 if (ret < 0)
1544                         return ret;
1545                 ULIST_ITER_INIT(&tmp_uiter);
1546                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1547                         struct btrfs_qgroup_list *glist;
1548
1549                         qg = u64_to_ptr(tmp_unode->aux);
1550                         if (update_old)
1551                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1552                         else
1553                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1554                         list_for_each_entry(glist, &qg->groups, next_group) {
1555                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1556                                                 ptr_to_u64(glist->group),
1557                                                 GFP_ATOMIC);
1558                                 if (ret < 0)
1559                                         return ret;
1560                                 ret = ulist_add(tmp, glist->group->qgroupid,
1561                                                 ptr_to_u64(glist->group),
1562                                                 GFP_ATOMIC);
1563                                 if (ret < 0)
1564                                         return ret;
1565                         }
1566                 }
1567         }
1568         return 0;
1569 }
1570
1571 /*
1572  * Update qgroup rfer/excl counters.
1573  * Rfer update is easy, codes can explain themselves.
1574  *
1575  * Excl update is tricky, the update is split into 2 part.
1576  * Part 1: Possible exclusive <-> sharing detect:
1577  *      |       A       |       !A      |
1578  *  -------------------------------------
1579  *  B   |       *       |       -       |
1580  *  -------------------------------------
1581  *  !B  |       +       |       **      |
1582  *  -------------------------------------
1583  *
1584  * Conditions:
1585  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1586  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1587  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1588  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1589  *
1590  * Results:
1591  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1592  * *: Definitely not changed.           **: Possible unchanged.
1593  *
1594  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1595  *
1596  * To make the logic clear, we first use condition A and B to split
1597  * combination into 4 results.
1598  *
1599  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1600  * only on variant maybe 0.
1601  *
1602  * Lastly, check result **, since there are 2 variants maybe 0, split them
1603  * again(2x2).
1604  * But this time we don't need to consider other things, the codes and logic
1605  * is easy to understand now.
1606  */
1607 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1608                                   struct ulist *qgroups,
1609                                   u64 nr_old_roots,
1610                                   u64 nr_new_roots,
1611                                   u64 num_bytes, u64 seq)
1612 {
1613         struct ulist_node *unode;
1614         struct ulist_iterator uiter;
1615         struct btrfs_qgroup *qg;
1616         u64 cur_new_count, cur_old_count;
1617
1618         ULIST_ITER_INIT(&uiter);
1619         while ((unode = ulist_next(qgroups, &uiter))) {
1620                 bool dirty = false;
1621
1622                 qg = u64_to_ptr(unode->aux);
1623                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1624                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1625
1626                 trace_qgroup_update_counters(fs_info, qg->qgroupid,
1627                                              cur_old_count, cur_new_count);
1628
1629                 /* Rfer update part */
1630                 if (cur_old_count == 0 && cur_new_count > 0) {
1631                         qg->rfer += num_bytes;
1632                         qg->rfer_cmpr += num_bytes;
1633                         dirty = true;
1634                 }
1635                 if (cur_old_count > 0 && cur_new_count == 0) {
1636                         qg->rfer -= num_bytes;
1637                         qg->rfer_cmpr -= num_bytes;
1638                         dirty = true;
1639                 }
1640
1641                 /* Excl update part */
1642                 /* Exclusive/none -> shared case */
1643                 if (cur_old_count == nr_old_roots &&
1644                     cur_new_count < nr_new_roots) {
1645                         /* Exclusive -> shared */
1646                         if (cur_old_count != 0) {
1647                                 qg->excl -= num_bytes;
1648                                 qg->excl_cmpr -= num_bytes;
1649                                 dirty = true;
1650                         }
1651                 }
1652
1653                 /* Shared -> exclusive/none case */
1654                 if (cur_old_count < nr_old_roots &&
1655                     cur_new_count == nr_new_roots) {
1656                         /* Shared->exclusive */
1657                         if (cur_new_count != 0) {
1658                                 qg->excl += num_bytes;
1659                                 qg->excl_cmpr += num_bytes;
1660                                 dirty = true;
1661                         }
1662                 }
1663
1664                 /* Exclusive/none -> exclusive/none case */
1665                 if (cur_old_count == nr_old_roots &&
1666                     cur_new_count == nr_new_roots) {
1667                         if (cur_old_count == 0) {
1668                                 /* None -> exclusive/none */
1669
1670                                 if (cur_new_count != 0) {
1671                                         /* None -> exclusive */
1672                                         qg->excl += num_bytes;
1673                                         qg->excl_cmpr += num_bytes;
1674                                         dirty = true;
1675                                 }
1676                                 /* None -> none, nothing changed */
1677                         } else {
1678                                 /* Exclusive -> exclusive/none */
1679
1680                                 if (cur_new_count == 0) {
1681                                         /* Exclusive -> none */
1682                                         qg->excl -= num_bytes;
1683                                         qg->excl_cmpr -= num_bytes;
1684                                         dirty = true;
1685                                 }
1686                                 /* Exclusive -> exclusive, nothing changed */
1687                         }
1688                 }
1689
1690                 if (dirty)
1691                         qgroup_dirty(fs_info, qg);
1692         }
1693         return 0;
1694 }
1695
1696 int
1697 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1698                             struct btrfs_fs_info *fs_info,
1699                             u64 bytenr, u64 num_bytes,
1700                             struct ulist *old_roots, struct ulist *new_roots)
1701 {
1702         struct ulist *qgroups = NULL;
1703         struct ulist *tmp = NULL;
1704         u64 seq;
1705         u64 nr_new_roots = 0;
1706         u64 nr_old_roots = 0;
1707         int ret = 0;
1708
1709         if (new_roots)
1710                 nr_new_roots = new_roots->nnodes;
1711         if (old_roots)
1712                 nr_old_roots = old_roots->nnodes;
1713
1714         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1715                 goto out_free;
1716         BUG_ON(!fs_info->quota_root);
1717
1718         trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
1719                                           nr_old_roots, nr_new_roots);
1720
1721         qgroups = ulist_alloc(GFP_NOFS);
1722         if (!qgroups) {
1723                 ret = -ENOMEM;
1724                 goto out_free;
1725         }
1726         tmp = ulist_alloc(GFP_NOFS);
1727         if (!tmp) {
1728                 ret = -ENOMEM;
1729                 goto out_free;
1730         }
1731
1732         mutex_lock(&fs_info->qgroup_rescan_lock);
1733         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1734                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
1735                         mutex_unlock(&fs_info->qgroup_rescan_lock);
1736                         ret = 0;
1737                         goto out_free;
1738                 }
1739         }
1740         mutex_unlock(&fs_info->qgroup_rescan_lock);
1741
1742         spin_lock(&fs_info->qgroup_lock);
1743         seq = fs_info->qgroup_seq;
1744
1745         /* Update old refcnts using old_roots */
1746         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
1747                                    UPDATE_OLD);
1748         if (ret < 0)
1749                 goto out;
1750
1751         /* Update new refcnts using new_roots */
1752         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
1753                                    UPDATE_NEW);
1754         if (ret < 0)
1755                 goto out;
1756
1757         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
1758                                num_bytes, seq);
1759
1760         /*
1761          * Bump qgroup_seq to avoid seq overlap
1762          */
1763         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
1764 out:
1765         spin_unlock(&fs_info->qgroup_lock);
1766 out_free:
1767         ulist_free(tmp);
1768         ulist_free(qgroups);
1769         ulist_free(old_roots);
1770         ulist_free(new_roots);
1771         return ret;
1772 }
1773
1774 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
1775                                  struct btrfs_fs_info *fs_info)
1776 {
1777         struct btrfs_qgroup_extent_record *record;
1778         struct btrfs_delayed_ref_root *delayed_refs;
1779         struct ulist *new_roots = NULL;
1780         struct rb_node *node;
1781         u64 qgroup_to_skip;
1782         int ret = 0;
1783
1784         delayed_refs = &trans->transaction->delayed_refs;
1785         qgroup_to_skip = delayed_refs->qgroup_to_skip;
1786         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
1787                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1788                                   node);
1789
1790                 trace_btrfs_qgroup_account_extents(fs_info, record);
1791
1792                 if (!ret) {
1793                         /*
1794                          * Use (u64)-1 as time_seq to do special search, which
1795                          * doesn't lock tree or delayed_refs and search current
1796                          * root. It's safe inside commit_transaction().
1797                          */
1798                         ret = btrfs_find_all_roots(trans, fs_info,
1799                                         record->bytenr, (u64)-1, &new_roots);
1800                         if (ret < 0)
1801                                 goto cleanup;
1802                         if (qgroup_to_skip)
1803                                 ulist_del(new_roots, qgroup_to_skip, 0);
1804                         ret = btrfs_qgroup_account_extent(trans, fs_info,
1805                                         record->bytenr, record->num_bytes,
1806                                         record->old_roots, new_roots);
1807                         record->old_roots = NULL;
1808                         new_roots = NULL;
1809                 }
1810 cleanup:
1811                 ulist_free(record->old_roots);
1812                 ulist_free(new_roots);
1813                 new_roots = NULL;
1814                 rb_erase(node, &delayed_refs->dirty_extent_root);
1815                 kfree(record);
1816
1817         }
1818         return ret;
1819 }
1820
1821 /*
1822  * called from commit_transaction. Writes all changed qgroups to disk.
1823  */
1824 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1825                       struct btrfs_fs_info *fs_info)
1826 {
1827         struct btrfs_root *quota_root = fs_info->quota_root;
1828         int ret = 0;
1829         int start_rescan_worker = 0;
1830
1831         if (!quota_root)
1832                 goto out;
1833
1834         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1835             test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
1836                 start_rescan_worker = 1;
1837
1838         if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
1839                 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1840         if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
1841                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1842
1843         spin_lock(&fs_info->qgroup_lock);
1844         while (!list_empty(&fs_info->dirty_qgroups)) {
1845                 struct btrfs_qgroup *qgroup;
1846                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1847                                           struct btrfs_qgroup, dirty);
1848                 list_del_init(&qgroup->dirty);
1849                 spin_unlock(&fs_info->qgroup_lock);
1850                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1851                 if (ret)
1852                         fs_info->qgroup_flags |=
1853                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1854                 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1855                 if (ret)
1856                         fs_info->qgroup_flags |=
1857                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1858                 spin_lock(&fs_info->qgroup_lock);
1859         }
1860         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1861                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1862         else
1863                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1864         spin_unlock(&fs_info->qgroup_lock);
1865
1866         ret = update_qgroup_status_item(trans, fs_info, quota_root);
1867         if (ret)
1868                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1869
1870         if (!ret && start_rescan_worker) {
1871                 ret = qgroup_rescan_init(fs_info, 0, 1);
1872                 if (!ret) {
1873                         qgroup_rescan_zero_tracking(fs_info);
1874                         btrfs_queue_work(fs_info->qgroup_rescan_workers,
1875                                          &fs_info->qgroup_rescan_work);
1876                 }
1877                 ret = 0;
1878         }
1879
1880 out:
1881
1882         return ret;
1883 }
1884
1885 /*
1886  * Copy the accounting information between qgroups. This is necessary
1887  * when a snapshot or a subvolume is created. Throwing an error will
1888  * cause a transaction abort so we take extra care here to only error
1889  * when a readonly fs is a reasonable outcome.
1890  */
1891 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1892                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1893                          struct btrfs_qgroup_inherit *inherit)
1894 {
1895         int ret = 0;
1896         int i;
1897         u64 *i_qgroups;
1898         struct btrfs_root *quota_root = fs_info->quota_root;
1899         struct btrfs_qgroup *srcgroup;
1900         struct btrfs_qgroup *dstgroup;
1901         u32 level_size = 0;
1902         u64 nums;
1903
1904         mutex_lock(&fs_info->qgroup_ioctl_lock);
1905         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1906                 goto out;
1907
1908         if (!quota_root) {
1909                 ret = -EINVAL;
1910                 goto out;
1911         }
1912
1913         if (inherit) {
1914                 i_qgroups = (u64 *)(inherit + 1);
1915                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1916                        2 * inherit->num_excl_copies;
1917                 for (i = 0; i < nums; ++i) {
1918                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1919
1920                         /*
1921                          * Zero out invalid groups so we can ignore
1922                          * them later.
1923                          */
1924                         if (!srcgroup ||
1925                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
1926                                 *i_qgroups = 0ULL;
1927
1928                         ++i_qgroups;
1929                 }
1930         }
1931
1932         /*
1933          * create a tracking group for the subvol itself
1934          */
1935         ret = add_qgroup_item(trans, quota_root, objectid);
1936         if (ret)
1937                 goto out;
1938
1939         if (srcid) {
1940                 struct btrfs_root *srcroot;
1941                 struct btrfs_key srckey;
1942
1943                 srckey.objectid = srcid;
1944                 srckey.type = BTRFS_ROOT_ITEM_KEY;
1945                 srckey.offset = (u64)-1;
1946                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1947                 if (IS_ERR(srcroot)) {
1948                         ret = PTR_ERR(srcroot);
1949                         goto out;
1950                 }
1951
1952                 rcu_read_lock();
1953                 level_size = srcroot->nodesize;
1954                 rcu_read_unlock();
1955         }
1956
1957         /*
1958          * add qgroup to all inherited groups
1959          */
1960         if (inherit) {
1961                 i_qgroups = (u64 *)(inherit + 1);
1962                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
1963                         if (*i_qgroups == 0)
1964                                 continue;
1965                         ret = add_qgroup_relation_item(trans, quota_root,
1966                                                        objectid, *i_qgroups);
1967                         if (ret && ret != -EEXIST)
1968                                 goto out;
1969                         ret = add_qgroup_relation_item(trans, quota_root,
1970                                                        *i_qgroups, objectid);
1971                         if (ret && ret != -EEXIST)
1972                                 goto out;
1973                 }
1974                 ret = 0;
1975         }
1976
1977
1978         spin_lock(&fs_info->qgroup_lock);
1979
1980         dstgroup = add_qgroup_rb(fs_info, objectid);
1981         if (IS_ERR(dstgroup)) {
1982                 ret = PTR_ERR(dstgroup);
1983                 goto unlock;
1984         }
1985
1986         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1987                 dstgroup->lim_flags = inherit->lim.flags;
1988                 dstgroup->max_rfer = inherit->lim.max_rfer;
1989                 dstgroup->max_excl = inherit->lim.max_excl;
1990                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
1991                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
1992
1993                 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
1994                 if (ret) {
1995                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1996                         btrfs_info(fs_info,
1997                                    "unable to update quota limit for %llu",
1998                                    dstgroup->qgroupid);
1999                         goto unlock;
2000                 }
2001         }
2002
2003         if (srcid) {
2004                 srcgroup = find_qgroup_rb(fs_info, srcid);
2005                 if (!srcgroup)
2006                         goto unlock;
2007
2008                 /*
2009                  * We call inherit after we clone the root in order to make sure
2010                  * our counts don't go crazy, so at this point the only
2011                  * difference between the two roots should be the root node.
2012                  */
2013                 dstgroup->rfer = srcgroup->rfer;
2014                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2015                 dstgroup->excl = level_size;
2016                 dstgroup->excl_cmpr = level_size;
2017                 srcgroup->excl = level_size;
2018                 srcgroup->excl_cmpr = level_size;
2019
2020                 /* inherit the limit info */
2021                 dstgroup->lim_flags = srcgroup->lim_flags;
2022                 dstgroup->max_rfer = srcgroup->max_rfer;
2023                 dstgroup->max_excl = srcgroup->max_excl;
2024                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2025                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2026
2027                 qgroup_dirty(fs_info, dstgroup);
2028                 qgroup_dirty(fs_info, srcgroup);
2029         }
2030
2031         if (!inherit)
2032                 goto unlock;
2033
2034         i_qgroups = (u64 *)(inherit + 1);
2035         for (i = 0; i < inherit->num_qgroups; ++i) {
2036                 if (*i_qgroups) {
2037                         ret = add_relation_rb(quota_root->fs_info, objectid,
2038                                               *i_qgroups);
2039                         if (ret)
2040                                 goto unlock;
2041                 }
2042                 ++i_qgroups;
2043         }
2044
2045         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2046                 struct btrfs_qgroup *src;
2047                 struct btrfs_qgroup *dst;
2048
2049                 if (!i_qgroups[0] || !i_qgroups[1])
2050                         continue;
2051
2052                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2053                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2054
2055                 if (!src || !dst) {
2056                         ret = -EINVAL;
2057                         goto unlock;
2058                 }
2059
2060                 dst->rfer = src->rfer - level_size;
2061                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2062         }
2063         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2064                 struct btrfs_qgroup *src;
2065                 struct btrfs_qgroup *dst;
2066
2067                 if (!i_qgroups[0] || !i_qgroups[1])
2068                         continue;
2069
2070                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2071                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2072
2073                 if (!src || !dst) {
2074                         ret = -EINVAL;
2075                         goto unlock;
2076                 }
2077
2078                 dst->excl = src->excl + level_size;
2079                 dst->excl_cmpr = src->excl_cmpr + level_size;
2080         }
2081
2082 unlock:
2083         spin_unlock(&fs_info->qgroup_lock);
2084 out:
2085         mutex_unlock(&fs_info->qgroup_ioctl_lock);
2086         return ret;
2087 }
2088
2089 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2090 {
2091         struct btrfs_root *quota_root;
2092         struct btrfs_qgroup *qgroup;
2093         struct btrfs_fs_info *fs_info = root->fs_info;
2094         u64 ref_root = root->root_key.objectid;
2095         int ret = 0;
2096         struct ulist_node *unode;
2097         struct ulist_iterator uiter;
2098
2099         if (!is_fstree(ref_root))
2100                 return 0;
2101
2102         if (num_bytes == 0)
2103                 return 0;
2104
2105         spin_lock(&fs_info->qgroup_lock);
2106         quota_root = fs_info->quota_root;
2107         if (!quota_root)
2108                 goto out;
2109
2110         qgroup = find_qgroup_rb(fs_info, ref_root);
2111         if (!qgroup)
2112                 goto out;
2113
2114         /*
2115          * in a first step, we check all affected qgroups if any limits would
2116          * be exceeded
2117          */
2118         ulist_reinit(fs_info->qgroup_ulist);
2119         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2120                         (uintptr_t)qgroup, GFP_ATOMIC);
2121         if (ret < 0)
2122                 goto out;
2123         ULIST_ITER_INIT(&uiter);
2124         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2125                 struct btrfs_qgroup *qg;
2126                 struct btrfs_qgroup_list *glist;
2127
2128                 qg = u64_to_ptr(unode->aux);
2129
2130                 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2131                     qg->reserved + (s64)qg->rfer + num_bytes >
2132                     qg->max_rfer) {
2133                         ret = -EDQUOT;
2134                         goto out;
2135                 }
2136
2137                 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2138                     qg->reserved + (s64)qg->excl + num_bytes >
2139                     qg->max_excl) {
2140                         ret = -EDQUOT;
2141                         goto out;
2142                 }
2143
2144                 list_for_each_entry(glist, &qg->groups, next_group) {
2145                         ret = ulist_add(fs_info->qgroup_ulist,
2146                                         glist->group->qgroupid,
2147                                         (uintptr_t)glist->group, GFP_ATOMIC);
2148                         if (ret < 0)
2149                                 goto out;
2150                 }
2151         }
2152         ret = 0;
2153         /*
2154          * no limits exceeded, now record the reservation into all qgroups
2155          */
2156         ULIST_ITER_INIT(&uiter);
2157         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2158                 struct btrfs_qgroup *qg;
2159
2160                 qg = u64_to_ptr(unode->aux);
2161
2162                 qg->reserved += num_bytes;
2163         }
2164
2165 out:
2166         spin_unlock(&fs_info->qgroup_lock);
2167         return ret;
2168 }
2169
2170 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2171                                u64 ref_root, u64 num_bytes)
2172 {
2173         struct btrfs_root *quota_root;
2174         struct btrfs_qgroup *qgroup;
2175         struct ulist_node *unode;
2176         struct ulist_iterator uiter;
2177         int ret = 0;
2178
2179         if (!is_fstree(ref_root))
2180                 return;
2181
2182         if (num_bytes == 0)
2183                 return;
2184
2185         spin_lock(&fs_info->qgroup_lock);
2186
2187         quota_root = fs_info->quota_root;
2188         if (!quota_root)
2189                 goto out;
2190
2191         qgroup = find_qgroup_rb(fs_info, ref_root);
2192         if (!qgroup)
2193                 goto out;
2194
2195         ulist_reinit(fs_info->qgroup_ulist);
2196         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2197                         (uintptr_t)qgroup, GFP_ATOMIC);
2198         if (ret < 0)
2199                 goto out;
2200         ULIST_ITER_INIT(&uiter);
2201         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2202                 struct btrfs_qgroup *qg;
2203                 struct btrfs_qgroup_list *glist;
2204
2205                 qg = u64_to_ptr(unode->aux);
2206
2207                 qg->reserved -= num_bytes;
2208
2209                 list_for_each_entry(glist, &qg->groups, next_group) {
2210                         ret = ulist_add(fs_info->qgroup_ulist,
2211                                         glist->group->qgroupid,
2212                                         (uintptr_t)glist->group, GFP_ATOMIC);
2213                         if (ret < 0)
2214                                 goto out;
2215                 }
2216         }
2217
2218 out:
2219         spin_unlock(&fs_info->qgroup_lock);
2220 }
2221
2222 static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes)
2223 {
2224         return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
2225                                          num_bytes);
2226 }
2227 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2228 {
2229         if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2230                 return;
2231         btrfs_err(trans->fs_info,
2232                 "qgroups not uptodate in trans handle %p:  list is%s empty, seq is %#x.%x",
2233                 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2234                 (u32)(trans->delayed_ref_elem.seq >> 32),
2235                 (u32)trans->delayed_ref_elem.seq);
2236         BUG();
2237 }
2238
2239 /*
2240  * returns < 0 on error, 0 when more leafs are to be scanned.
2241  * returns 1 when done.
2242  */
2243 static int
2244 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2245                    struct btrfs_trans_handle *trans)
2246 {
2247         struct btrfs_key found;
2248         struct extent_buffer *scratch_leaf = NULL;
2249         struct ulist *roots = NULL;
2250         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2251         u64 num_bytes;
2252         int slot;
2253         int ret;
2254
2255         mutex_lock(&fs_info->qgroup_rescan_lock);
2256         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2257                                          &fs_info->qgroup_rescan_progress,
2258                                          path, 1, 0);
2259
2260         btrfs_debug(fs_info,
2261                 "current progress key (%llu %u %llu), search_slot ret %d",
2262                 fs_info->qgroup_rescan_progress.objectid,
2263                 fs_info->qgroup_rescan_progress.type,
2264                 fs_info->qgroup_rescan_progress.offset, ret);
2265
2266         if (ret) {
2267                 /*
2268                  * The rescan is about to end, we will not be scanning any
2269                  * further blocks. We cannot unset the RESCAN flag here, because
2270                  * we want to commit the transaction if everything went well.
2271                  * To make the live accounting work in this phase, we set our
2272                  * scan progress pointer such that every real extent objectid
2273                  * will be smaller.
2274                  */
2275                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2276                 btrfs_release_path(path);
2277                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2278                 return ret;
2279         }
2280
2281         btrfs_item_key_to_cpu(path->nodes[0], &found,
2282                               btrfs_header_nritems(path->nodes[0]) - 1);
2283         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2284
2285         btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2286         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2287         if (!scratch_leaf) {
2288                 ret = -ENOMEM;
2289                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2290                 goto out;
2291         }
2292         extent_buffer_get(scratch_leaf);
2293         btrfs_tree_read_lock(scratch_leaf);
2294         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2295         slot = path->slots[0];
2296         btrfs_release_path(path);
2297         mutex_unlock(&fs_info->qgroup_rescan_lock);
2298
2299         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2300                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2301                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2302                     found.type != BTRFS_METADATA_ITEM_KEY)
2303                         continue;
2304                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2305                         num_bytes = fs_info->extent_root->nodesize;
2306                 else
2307                         num_bytes = found.offset;
2308
2309                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2310                                            &roots);
2311                 if (ret < 0)
2312                         goto out;
2313                 /* For rescan, just pass old_roots as NULL */
2314                 ret = btrfs_qgroup_account_extent(trans, fs_info,
2315                                 found.objectid, num_bytes, NULL, roots);
2316                 if (ret < 0)
2317                         goto out;
2318         }
2319 out:
2320         if (scratch_leaf) {
2321                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2322                 free_extent_buffer(scratch_leaf);
2323         }
2324         btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2325
2326         return ret;
2327 }
2328
2329 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2330 {
2331         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2332                                                      qgroup_rescan_work);
2333         struct btrfs_path *path;
2334         struct btrfs_trans_handle *trans = NULL;
2335         int err = -ENOMEM;
2336         int ret = 0;
2337
2338         mutex_lock(&fs_info->qgroup_rescan_lock);
2339         fs_info->qgroup_rescan_running = true;
2340         mutex_unlock(&fs_info->qgroup_rescan_lock);
2341
2342         path = btrfs_alloc_path();
2343         if (!path)
2344                 goto out;
2345
2346         err = 0;
2347         while (!err && !btrfs_fs_closing(fs_info)) {
2348                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2349                 if (IS_ERR(trans)) {
2350                         err = PTR_ERR(trans);
2351                         break;
2352                 }
2353                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2354                         err = -EINTR;
2355                 } else {
2356                         err = qgroup_rescan_leaf(fs_info, path, trans);
2357                 }
2358                 if (err > 0)
2359                         btrfs_commit_transaction(trans, fs_info->fs_root);
2360                 else
2361                         btrfs_end_transaction(trans, fs_info->fs_root);
2362         }
2363
2364 out:
2365         btrfs_free_path(path);
2366
2367         mutex_lock(&fs_info->qgroup_rescan_lock);
2368         if (!btrfs_fs_closing(fs_info))
2369                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2370
2371         if (err > 0 &&
2372             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2373                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2374         } else if (err < 0) {
2375                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2376         }
2377         mutex_unlock(&fs_info->qgroup_rescan_lock);
2378
2379         /*
2380          * only update status, since the previous part has already updated the
2381          * qgroup info.
2382          */
2383         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2384         if (IS_ERR(trans)) {
2385                 err = PTR_ERR(trans);
2386                 btrfs_err(fs_info,
2387                           "fail to start transaction for status update: %d\n",
2388                           err);
2389                 goto done;
2390         }
2391         ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2392         if (ret < 0) {
2393                 err = ret;
2394                 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2395         }
2396         btrfs_end_transaction(trans, fs_info->quota_root);
2397
2398         if (btrfs_fs_closing(fs_info)) {
2399                 btrfs_info(fs_info, "qgroup scan paused");
2400         } else if (err >= 0) {
2401                 btrfs_info(fs_info, "qgroup scan completed%s",
2402                         err > 0 ? " (inconsistency flag cleared)" : "");
2403         } else {
2404                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2405         }
2406
2407 done:
2408         mutex_lock(&fs_info->qgroup_rescan_lock);
2409         fs_info->qgroup_rescan_running = false;
2410         mutex_unlock(&fs_info->qgroup_rescan_lock);
2411         complete_all(&fs_info->qgroup_rescan_completion);
2412 }
2413
2414 /*
2415  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2416  * memory required for the rescan context.
2417  */
2418 static int
2419 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2420                    int init_flags)
2421 {
2422         int ret = 0;
2423
2424         if (!init_flags &&
2425             (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2426              !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2427                 ret = -EINVAL;
2428                 goto err;
2429         }
2430
2431         mutex_lock(&fs_info->qgroup_rescan_lock);
2432         spin_lock(&fs_info->qgroup_lock);
2433
2434         if (init_flags) {
2435                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2436                         ret = -EINPROGRESS;
2437                 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2438                         ret = -EINVAL;
2439
2440                 if (ret) {
2441                         spin_unlock(&fs_info->qgroup_lock);
2442                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2443                         goto err;
2444                 }
2445                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2446         }
2447
2448         memset(&fs_info->qgroup_rescan_progress, 0,
2449                 sizeof(fs_info->qgroup_rescan_progress));
2450         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2451         init_completion(&fs_info->qgroup_rescan_completion);
2452
2453         spin_unlock(&fs_info->qgroup_lock);
2454         mutex_unlock(&fs_info->qgroup_rescan_lock);
2455
2456         memset(&fs_info->qgroup_rescan_work, 0,
2457                sizeof(fs_info->qgroup_rescan_work));
2458         btrfs_init_work(&fs_info->qgroup_rescan_work,
2459                         btrfs_qgroup_rescan_helper,
2460                         btrfs_qgroup_rescan_worker, NULL, NULL);
2461
2462         if (ret) {
2463 err:
2464                 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2465                 return ret;
2466         }
2467
2468         return 0;
2469 }
2470
2471 static void
2472 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2473 {
2474         struct rb_node *n;
2475         struct btrfs_qgroup *qgroup;
2476
2477         spin_lock(&fs_info->qgroup_lock);
2478         /* clear all current qgroup tracking information */
2479         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2480                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2481                 qgroup->rfer = 0;
2482                 qgroup->rfer_cmpr = 0;
2483                 qgroup->excl = 0;
2484                 qgroup->excl_cmpr = 0;
2485         }
2486         spin_unlock(&fs_info->qgroup_lock);
2487 }
2488
2489 int
2490 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2491 {
2492         int ret = 0;
2493         struct btrfs_trans_handle *trans;
2494
2495         ret = qgroup_rescan_init(fs_info, 0, 1);
2496         if (ret)
2497                 return ret;
2498
2499         /*
2500          * We have set the rescan_progress to 0, which means no more
2501          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2502          * However, btrfs_qgroup_account_ref may be right after its call
2503          * to btrfs_find_all_roots, in which case it would still do the
2504          * accounting.
2505          * To solve this, we're committing the transaction, which will
2506          * ensure we run all delayed refs and only after that, we are
2507          * going to clear all tracking information for a clean start.
2508          */
2509
2510         trans = btrfs_join_transaction(fs_info->fs_root);
2511         if (IS_ERR(trans)) {
2512                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2513                 return PTR_ERR(trans);
2514         }
2515         ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2516         if (ret) {
2517                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2518                 return ret;
2519         }
2520
2521         qgroup_rescan_zero_tracking(fs_info);
2522
2523         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2524                          &fs_info->qgroup_rescan_work);
2525
2526         return 0;
2527 }
2528
2529 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2530                                      bool interruptible)
2531 {
2532         int running;
2533         int ret = 0;
2534
2535         mutex_lock(&fs_info->qgroup_rescan_lock);
2536         spin_lock(&fs_info->qgroup_lock);
2537         running = fs_info->qgroup_rescan_running;
2538         spin_unlock(&fs_info->qgroup_lock);
2539         mutex_unlock(&fs_info->qgroup_rescan_lock);
2540
2541         if (!running)
2542                 return 0;
2543
2544         if (interruptible)
2545                 ret = wait_for_completion_interruptible(
2546                                         &fs_info->qgroup_rescan_completion);
2547         else
2548                 wait_for_completion(&fs_info->qgroup_rescan_completion);
2549
2550         return ret;
2551 }
2552
2553 /*
2554  * this is only called from open_ctree where we're still single threaded, thus
2555  * locking is omitted here.
2556  */
2557 void
2558 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2559 {
2560         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2561                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2562                                  &fs_info->qgroup_rescan_work);
2563 }
2564
2565 /*
2566  * Reserve qgroup space for range [start, start + len).
2567  *
2568  * This function will either reserve space from related qgroups or doing
2569  * nothing if the range is already reserved.
2570  *
2571  * Return 0 for successful reserve
2572  * Return <0 for error (including -EQUOT)
2573  *
2574  * NOTE: this function may sleep for memory allocation.
2575  */
2576 int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2577 {
2578         struct btrfs_root *root = BTRFS_I(inode)->root;
2579         struct extent_changeset changeset;
2580         struct ulist_node *unode;
2581         struct ulist_iterator uiter;
2582         int ret;
2583
2584         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2585             !is_fstree(root->objectid) || len == 0)
2586                 return 0;
2587
2588         changeset.bytes_changed = 0;
2589         changeset.range_changed = ulist_alloc(GFP_NOFS);
2590         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2591                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2592         trace_btrfs_qgroup_reserve_data(inode, start, len,
2593                                         changeset.bytes_changed,
2594                                         QGROUP_RESERVE);
2595         if (ret < 0)
2596                 goto cleanup;
2597         ret = qgroup_reserve(root, changeset.bytes_changed);
2598         if (ret < 0)
2599                 goto cleanup;
2600
2601         ulist_free(changeset.range_changed);
2602         return ret;
2603
2604 cleanup:
2605         /* cleanup already reserved ranges */
2606         ULIST_ITER_INIT(&uiter);
2607         while ((unode = ulist_next(changeset.range_changed, &uiter)))
2608                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2609                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
2610                                  GFP_NOFS);
2611         ulist_free(changeset.range_changed);
2612         return ret;
2613 }
2614
2615 static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2616                                        int free)
2617 {
2618         struct extent_changeset changeset;
2619         int trace_op = QGROUP_RELEASE;
2620         int ret;
2621
2622         changeset.bytes_changed = 0;
2623         changeset.range_changed = ulist_alloc(GFP_NOFS);
2624         if (!changeset.range_changed)
2625                 return -ENOMEM;
2626
2627         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
2628                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2629         if (ret < 0)
2630                 goto out;
2631
2632         if (free) {
2633                 qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2634                 trace_op = QGROUP_FREE;
2635         }
2636         trace_btrfs_qgroup_release_data(inode, start, len,
2637                                         changeset.bytes_changed, trace_op);
2638 out:
2639         ulist_free(changeset.range_changed);
2640         return ret;
2641 }
2642
2643 /*
2644  * Free a reserved space range from io_tree and related qgroups
2645  *
2646  * Should be called when a range of pages get invalidated before reaching disk.
2647  * Or for error cleanup case.
2648  *
2649  * For data written to disk, use btrfs_qgroup_release_data().
2650  *
2651  * NOTE: This function may sleep for memory allocation.
2652  */
2653 int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
2654 {
2655         return __btrfs_qgroup_release_data(inode, start, len, 1);
2656 }
2657
2658 /*
2659  * Release a reserved space range from io_tree only.
2660  *
2661  * Should be called when a range of pages get written to disk and corresponding
2662  * FILE_EXTENT is inserted into corresponding root.
2663  *
2664  * Since new qgroup accounting framework will only update qgroup numbers at
2665  * commit_transaction() time, its reserved space shouldn't be freed from
2666  * related qgroups.
2667  *
2668  * But we should release the range from io_tree, to allow further write to be
2669  * COWed.
2670  *
2671  * NOTE: This function may sleep for memory allocation.
2672  */
2673 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
2674 {
2675         return __btrfs_qgroup_release_data(inode, start, len, 0);
2676 }
2677
2678 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
2679 {
2680         int ret;
2681
2682         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2683             !is_fstree(root->objectid) || num_bytes == 0)
2684                 return 0;
2685
2686         BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2687         ret = qgroup_reserve(root, num_bytes);
2688         if (ret < 0)
2689                 return ret;
2690         atomic_add(num_bytes, &root->qgroup_meta_rsv);
2691         return ret;
2692 }
2693
2694 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2695 {
2696         int reserved;
2697
2698         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2699             !is_fstree(root->objectid))
2700                 return;
2701
2702         reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
2703         if (reserved == 0)
2704                 return;
2705         qgroup_free(root, reserved);
2706 }
2707
2708 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2709 {
2710         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2711             !is_fstree(root->objectid))
2712                 return;
2713
2714         BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2715         WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
2716         atomic_sub(num_bytes, &root->qgroup_meta_rsv);
2717         qgroup_free(root, num_bytes);
2718 }
2719
2720 /*
2721  * Check qgroup reserved space leaking, normally at destroy inode
2722  * time
2723  */
2724 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2725 {
2726         struct extent_changeset changeset;
2727         struct ulist_node *unode;
2728         struct ulist_iterator iter;
2729         int ret;
2730
2731         changeset.bytes_changed = 0;
2732         changeset.range_changed = ulist_alloc(GFP_NOFS);
2733         if (WARN_ON(!changeset.range_changed))
2734                 return;
2735
2736         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
2737                         EXTENT_QGROUP_RESERVED, &changeset);
2738
2739         WARN_ON(ret < 0);
2740         if (WARN_ON(changeset.bytes_changed)) {
2741                 ULIST_ITER_INIT(&iter);
2742                 while ((unode = ulist_next(changeset.range_changed, &iter))) {
2743                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
2744                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
2745                                 inode->i_ino, unode->val, unode->aux);
2746                 }
2747                 qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2748         }
2749         ulist_free(changeset.range_changed);
2750 }