ASoC: tpa6130a2: fix volume setting when no stream is running
[cascardo/linux.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44                           struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47                 if (ref1->root < ref2->root)
48                         return -1;
49                 if (ref1->root > ref2->root)
50                         return 1;
51         } else {
52                 if (ref1->parent < ref2->parent)
53                         return -1;
54                 if (ref1->parent > ref2->parent)
55                         return 1;
56         }
57         return 0;
58 }
59
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64                           struct btrfs_delayed_data_ref *ref1)
65 {
66         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67                 if (ref1->root < ref2->root)
68                         return -1;
69                 if (ref1->root > ref2->root)
70                         return 1;
71                 if (ref1->objectid < ref2->objectid)
72                         return -1;
73                 if (ref1->objectid > ref2->objectid)
74                         return 1;
75                 if (ref1->offset < ref2->offset)
76                         return -1;
77                 if (ref1->offset > ref2->offset)
78                         return 1;
79         } else {
80                 if (ref1->parent < ref2->parent)
81                         return -1;
82                 if (ref1->parent > ref2->parent)
83                         return 1;
84         }
85         return 0;
86 }
87
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90                                                    struct rb_node *node)
91 {
92         struct rb_node **p = &root->rb_node;
93         struct rb_node *parent_node = NULL;
94         struct btrfs_delayed_ref_head *entry;
95         struct btrfs_delayed_ref_head *ins;
96         u64 bytenr;
97
98         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99         bytenr = ins->node.bytenr;
100         while (*p) {
101                 parent_node = *p;
102                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103                                  href_node);
104
105                 if (bytenr < entry->node.bytenr)
106                         p = &(*p)->rb_left;
107                 else if (bytenr > entry->node.bytenr)
108                         p = &(*p)->rb_right;
109                 else
110                         return entry;
111         }
112
113         rb_link_node(node, parent_node, p);
114         rb_insert_color(node, root);
115         return NULL;
116 }
117
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126               int return_bigger)
127 {
128         struct rb_node *n;
129         struct btrfs_delayed_ref_head *entry;
130
131         n = root->rb_node;
132         entry = NULL;
133         while (n) {
134                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136                 if (bytenr < entry->node.bytenr)
137                         n = n->rb_left;
138                 else if (bytenr > entry->node.bytenr)
139                         n = n->rb_right;
140                 else
141                         return entry;
142         }
143         if (entry && return_bigger) {
144                 if (bytenr > entry->node.bytenr) {
145                         n = rb_next(&entry->href_node);
146                         if (!n)
147                                 n = rb_first(root);
148                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
149                                          href_node);
150                         return entry;
151                 }
152                 return entry;
153         }
154         return NULL;
155 }
156
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158                            struct btrfs_delayed_ref_head *head)
159 {
160         struct btrfs_delayed_ref_root *delayed_refs;
161
162         delayed_refs = &trans->transaction->delayed_refs;
163         assert_spin_locked(&delayed_refs->lock);
164         if (mutex_trylock(&head->mutex))
165                 return 0;
166
167         atomic_inc(&head->node.refs);
168         spin_unlock(&delayed_refs->lock);
169
170         mutex_lock(&head->mutex);
171         spin_lock(&delayed_refs->lock);
172         if (!head->node.in_tree) {
173                 mutex_unlock(&head->mutex);
174                 btrfs_put_delayed_ref(&head->node);
175                 return -EAGAIN;
176         }
177         btrfs_put_delayed_ref(&head->node);
178         return 0;
179 }
180
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182                                     struct btrfs_delayed_ref_root *delayed_refs,
183                                     struct btrfs_delayed_ref_head *head,
184                                     struct btrfs_delayed_ref_node *ref)
185 {
186         if (btrfs_delayed_ref_is_head(ref)) {
187                 head = btrfs_delayed_node_to_head(ref);
188                 rb_erase(&head->href_node, &delayed_refs->href_root);
189         } else {
190                 assert_spin_locked(&head->lock);
191                 list_del(&ref->list);
192         }
193         ref->in_tree = 0;
194         btrfs_put_delayed_ref(ref);
195         atomic_dec(&delayed_refs->num_entries);
196         if (trans->delayed_ref_updates)
197                 trans->delayed_ref_updates--;
198 }
199
200 static bool merge_ref(struct btrfs_trans_handle *trans,
201                       struct btrfs_delayed_ref_root *delayed_refs,
202                       struct btrfs_delayed_ref_head *head,
203                       struct btrfs_delayed_ref_node *ref,
204                       u64 seq)
205 {
206         struct btrfs_delayed_ref_node *next;
207         bool done = false;
208
209         next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210                                 list);
211         while (!done && &next->list != &head->ref_list) {
212                 int mod;
213                 struct btrfs_delayed_ref_node *next2;
214
215                 next2 = list_next_entry(next, list);
216
217                 if (next == ref)
218                         goto next;
219
220                 if (seq && next->seq >= seq)
221                         goto next;
222
223                 if (next->type != ref->type)
224                         goto next;
225
226                 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227                      ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228                     comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229                                    btrfs_delayed_node_to_tree_ref(next),
230                                    ref->type))
231                         goto next;
232                 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233                      ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234                     comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235                                    btrfs_delayed_node_to_data_ref(next)))
236                         goto next;
237
238                 if (ref->action == next->action) {
239                         mod = next->ref_mod;
240                 } else {
241                         if (ref->ref_mod < next->ref_mod) {
242                                 swap(ref, next);
243                                 done = true;
244                         }
245                         mod = -next->ref_mod;
246                 }
247
248                 drop_delayed_ref(trans, delayed_refs, head, next);
249                 ref->ref_mod += mod;
250                 if (ref->ref_mod == 0) {
251                         drop_delayed_ref(trans, delayed_refs, head, ref);
252                         done = true;
253                 } else {
254                         /*
255                          * Can't have multiples of the same ref on a tree block.
256                          */
257                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259                 }
260 next:
261                 next = next2;
262         }
263
264         return done;
265 }
266
267 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268                               struct btrfs_fs_info *fs_info,
269                               struct btrfs_delayed_ref_root *delayed_refs,
270                               struct btrfs_delayed_ref_head *head)
271 {
272         struct btrfs_delayed_ref_node *ref;
273         u64 seq = 0;
274
275         assert_spin_locked(&head->lock);
276
277         if (list_empty(&head->ref_list))
278                 return;
279
280         /* We don't have too many refs to merge for data. */
281         if (head->is_data)
282                 return;
283
284         spin_lock(&fs_info->tree_mod_seq_lock);
285         if (!list_empty(&fs_info->tree_mod_seq_list)) {
286                 struct seq_list *elem;
287
288                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
289                                         struct seq_list, list);
290                 seq = elem->seq;
291         }
292         spin_unlock(&fs_info->tree_mod_seq_lock);
293
294         ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295                                list);
296         while (&ref->list != &head->ref_list) {
297                 if (seq && ref->seq >= seq)
298                         goto next;
299
300                 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301                         if (list_empty(&head->ref_list))
302                                 break;
303                         ref = list_first_entry(&head->ref_list,
304                                                struct btrfs_delayed_ref_node,
305                                                list);
306                         continue;
307                 }
308 next:
309                 ref = list_next_entry(ref, list);
310         }
311 }
312
313 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314                             struct btrfs_delayed_ref_root *delayed_refs,
315                             u64 seq)
316 {
317         struct seq_list *elem;
318         int ret = 0;
319
320         spin_lock(&fs_info->tree_mod_seq_lock);
321         if (!list_empty(&fs_info->tree_mod_seq_list)) {
322                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
323                                         struct seq_list, list);
324                 if (seq >= elem->seq) {
325                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326                                  (u32)(seq >> 32), (u32)seq,
327                                  (u32)(elem->seq >> 32), (u32)elem->seq,
328                                  delayed_refs);
329                         ret = 1;
330                 }
331         }
332
333         spin_unlock(&fs_info->tree_mod_seq_lock);
334         return ret;
335 }
336
337 struct btrfs_delayed_ref_head *
338 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
339 {
340         struct btrfs_delayed_ref_root *delayed_refs;
341         struct btrfs_delayed_ref_head *head;
342         u64 start;
343         bool loop = false;
344
345         delayed_refs = &trans->transaction->delayed_refs;
346
347 again:
348         start = delayed_refs->run_delayed_start;
349         head = find_ref_head(&delayed_refs->href_root, start, 1);
350         if (!head && !loop) {
351                 delayed_refs->run_delayed_start = 0;
352                 start = 0;
353                 loop = true;
354                 head = find_ref_head(&delayed_refs->href_root, start, 1);
355                 if (!head)
356                         return NULL;
357         } else if (!head && loop) {
358                 return NULL;
359         }
360
361         while (head->processing) {
362                 struct rb_node *node;
363
364                 node = rb_next(&head->href_node);
365                 if (!node) {
366                         if (loop)
367                                 return NULL;
368                         delayed_refs->run_delayed_start = 0;
369                         start = 0;
370                         loop = true;
371                         goto again;
372                 }
373                 head = rb_entry(node, struct btrfs_delayed_ref_head,
374                                 href_node);
375         }
376
377         head->processing = 1;
378         WARN_ON(delayed_refs->num_heads_ready == 0);
379         delayed_refs->num_heads_ready--;
380         delayed_refs->run_delayed_start = head->node.bytenr +
381                 head->node.num_bytes;
382         return head;
383 }
384
385 /*
386  * Helper to insert the ref_node to the tail or merge with tail.
387  *
388  * Return 0 for insert.
389  * Return >0 for merge.
390  */
391 static int
392 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393                            struct btrfs_delayed_ref_root *root,
394                            struct btrfs_delayed_ref_head *href,
395                            struct btrfs_delayed_ref_node *ref)
396 {
397         struct btrfs_delayed_ref_node *exist;
398         int mod;
399         int ret = 0;
400
401         spin_lock(&href->lock);
402         /* Check whether we can merge the tail node with ref */
403         if (list_empty(&href->ref_list))
404                 goto add_tail;
405         exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406                            list);
407         /* No need to compare bytenr nor is_head */
408         if (exist->type != ref->type || exist->seq != ref->seq)
409                 goto add_tail;
410
411         if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412              exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413             comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414                            btrfs_delayed_node_to_tree_ref(ref),
415                            ref->type))
416                 goto add_tail;
417         if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418              exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419             comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420                            btrfs_delayed_node_to_data_ref(ref)))
421                 goto add_tail;
422
423         /* Now we are sure we can merge */
424         ret = 1;
425         if (exist->action == ref->action) {
426                 mod = ref->ref_mod;
427         } else {
428                 /* Need to change action */
429                 if (exist->ref_mod < ref->ref_mod) {
430                         exist->action = ref->action;
431                         mod = -exist->ref_mod;
432                         exist->ref_mod = ref->ref_mod;
433                 } else
434                         mod = -ref->ref_mod;
435         }
436         exist->ref_mod += mod;
437
438         /* remove existing tail if its ref_mod is zero */
439         if (exist->ref_mod == 0)
440                 drop_delayed_ref(trans, root, href, exist);
441         spin_unlock(&href->lock);
442         return ret;
443
444 add_tail:
445         list_add_tail(&ref->list, &href->ref_list);
446         atomic_inc(&root->num_entries);
447         trans->delayed_ref_updates++;
448         spin_unlock(&href->lock);
449         return ret;
450 }
451
452 /*
453  * helper function to update the accounting in the head ref
454  * existing and update must have the same bytenr
455  */
456 static noinline void
457 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458                          struct btrfs_delayed_ref_node *existing,
459                          struct btrfs_delayed_ref_node *update)
460 {
461         struct btrfs_delayed_ref_head *existing_ref;
462         struct btrfs_delayed_ref_head *ref;
463         int old_ref_mod;
464
465         existing_ref = btrfs_delayed_node_to_head(existing);
466         ref = btrfs_delayed_node_to_head(update);
467         BUG_ON(existing_ref->is_data != ref->is_data);
468
469         spin_lock(&existing_ref->lock);
470         if (ref->must_insert_reserved) {
471                 /* if the extent was freed and then
472                  * reallocated before the delayed ref
473                  * entries were processed, we can end up
474                  * with an existing head ref without
475                  * the must_insert_reserved flag set.
476                  * Set it again here
477                  */
478                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
479
480                 /*
481                  * update the num_bytes so we make sure the accounting
482                  * is done correctly
483                  */
484                 existing->num_bytes = update->num_bytes;
485
486         }
487
488         if (ref->extent_op) {
489                 if (!existing_ref->extent_op) {
490                         existing_ref->extent_op = ref->extent_op;
491                 } else {
492                         if (ref->extent_op->update_key) {
493                                 memcpy(&existing_ref->extent_op->key,
494                                        &ref->extent_op->key,
495                                        sizeof(ref->extent_op->key));
496                                 existing_ref->extent_op->update_key = true;
497                         }
498                         if (ref->extent_op->update_flags) {
499                                 existing_ref->extent_op->flags_to_set |=
500                                         ref->extent_op->flags_to_set;
501                                 existing_ref->extent_op->update_flags = true;
502                         }
503                         btrfs_free_delayed_extent_op(ref->extent_op);
504                 }
505         }
506         /*
507          * update the reference mod on the head to reflect this new operation,
508          * only need the lock for this case cause we could be processing it
509          * currently, for refs we just added we know we're a-ok.
510          */
511         old_ref_mod = existing_ref->total_ref_mod;
512         existing->ref_mod += update->ref_mod;
513         existing_ref->total_ref_mod += update->ref_mod;
514
515         /*
516          * If we are going to from a positive ref mod to a negative or vice
517          * versa we need to make sure to adjust pending_csums accordingly.
518          */
519         if (existing_ref->is_data) {
520                 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
521                         delayed_refs->pending_csums -= existing->num_bytes;
522                 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
523                         delayed_refs->pending_csums += existing->num_bytes;
524         }
525         spin_unlock(&existing_ref->lock);
526 }
527
528 /*
529  * helper function to actually insert a head node into the rbtree.
530  * this does all the dirty work in terms of maintaining the correct
531  * overall modification count.
532  */
533 static noinline struct btrfs_delayed_ref_head *
534 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535                      struct btrfs_trans_handle *trans,
536                      struct btrfs_delayed_ref_node *ref,
537                      struct btrfs_qgroup_extent_record *qrecord,
538                      u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539                      int action, int is_data)
540 {
541         struct btrfs_delayed_ref_head *existing;
542         struct btrfs_delayed_ref_head *head_ref = NULL;
543         struct btrfs_delayed_ref_root *delayed_refs;
544         struct btrfs_qgroup_extent_record *qexisting;
545         int count_mod = 1;
546         int must_insert_reserved = 0;
547
548         /* If reserved is provided, it must be a data extent. */
549         BUG_ON(!is_data && reserved);
550
551         /*
552          * the head node stores the sum of all the mods, so dropping a ref
553          * should drop the sum in the head node by one.
554          */
555         if (action == BTRFS_UPDATE_DELAYED_HEAD)
556                 count_mod = 0;
557         else if (action == BTRFS_DROP_DELAYED_REF)
558                 count_mod = -1;
559
560         /*
561          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562          * the reserved accounting when the extent is finally added, or
563          * if a later modification deletes the delayed ref without ever
564          * inserting the extent into the extent allocation tree.
565          * ref->must_insert_reserved is the flag used to record
566          * that accounting mods are required.
567          *
568          * Once we record must_insert_reserved, switch the action to
569          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
570          */
571         if (action == BTRFS_ADD_DELAYED_EXTENT)
572                 must_insert_reserved = 1;
573         else
574                 must_insert_reserved = 0;
575
576         delayed_refs = &trans->transaction->delayed_refs;
577
578         /* first set the basic ref node struct up */
579         atomic_set(&ref->refs, 1);
580         ref->bytenr = bytenr;
581         ref->num_bytes = num_bytes;
582         ref->ref_mod = count_mod;
583         ref->type  = 0;
584         ref->action  = 0;
585         ref->is_head = 1;
586         ref->in_tree = 1;
587         ref->seq = 0;
588
589         head_ref = btrfs_delayed_node_to_head(ref);
590         head_ref->must_insert_reserved = must_insert_reserved;
591         head_ref->is_data = is_data;
592         INIT_LIST_HEAD(&head_ref->ref_list);
593         head_ref->processing = 0;
594         head_ref->total_ref_mod = count_mod;
595         head_ref->qgroup_reserved = 0;
596         head_ref->qgroup_ref_root = 0;
597
598         /* Record qgroup extent info if provided */
599         if (qrecord) {
600                 if (ref_root && reserved) {
601                         head_ref->qgroup_ref_root = ref_root;
602                         head_ref->qgroup_reserved = reserved;
603                 }
604
605                 qrecord->bytenr = bytenr;
606                 qrecord->num_bytes = num_bytes;
607                 qrecord->old_roots = NULL;
608
609                 qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
610                                                              delayed_refs,
611                                                              qrecord);
612                 if (qexisting)
613                         kfree(qrecord);
614         }
615
616         spin_lock_init(&head_ref->lock);
617         mutex_init(&head_ref->mutex);
618
619         trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
620
621         existing = htree_insert(&delayed_refs->href_root,
622                                 &head_ref->href_node);
623         if (existing) {
624                 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
625                         && existing->qgroup_reserved);
626                 update_existing_head_ref(delayed_refs, &existing->node, ref);
627                 /*
628                  * we've updated the existing ref, free the newly
629                  * allocated ref
630                  */
631                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
632                 head_ref = existing;
633         } else {
634                 if (is_data && count_mod < 0)
635                         delayed_refs->pending_csums += num_bytes;
636                 delayed_refs->num_heads++;
637                 delayed_refs->num_heads_ready++;
638                 atomic_inc(&delayed_refs->num_entries);
639                 trans->delayed_ref_updates++;
640         }
641         return head_ref;
642 }
643
644 /*
645  * helper to insert a delayed tree ref into the rbtree.
646  */
647 static noinline void
648 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
649                      struct btrfs_trans_handle *trans,
650                      struct btrfs_delayed_ref_head *head_ref,
651                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
652                      u64 num_bytes, u64 parent, u64 ref_root, int level,
653                      int action)
654 {
655         struct btrfs_delayed_tree_ref *full_ref;
656         struct btrfs_delayed_ref_root *delayed_refs;
657         u64 seq = 0;
658         int ret;
659
660         if (action == BTRFS_ADD_DELAYED_EXTENT)
661                 action = BTRFS_ADD_DELAYED_REF;
662
663         if (is_fstree(ref_root))
664                 seq = atomic64_read(&fs_info->tree_mod_seq);
665         delayed_refs = &trans->transaction->delayed_refs;
666
667         /* first set the basic ref node struct up */
668         atomic_set(&ref->refs, 1);
669         ref->bytenr = bytenr;
670         ref->num_bytes = num_bytes;
671         ref->ref_mod = 1;
672         ref->action = action;
673         ref->is_head = 0;
674         ref->in_tree = 1;
675         ref->seq = seq;
676
677         full_ref = btrfs_delayed_node_to_tree_ref(ref);
678         full_ref->parent = parent;
679         full_ref->root = ref_root;
680         if (parent)
681                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
682         else
683                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
684         full_ref->level = level;
685
686         trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
687
688         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
689
690         /*
691          * XXX: memory should be freed at the same level allocated.
692          * But bad practice is anywhere... Follow it now. Need cleanup.
693          */
694         if (ret > 0)
695                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
696 }
697
698 /*
699  * helper to insert a delayed data ref into the rbtree.
700  */
701 static noinline void
702 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
703                      struct btrfs_trans_handle *trans,
704                      struct btrfs_delayed_ref_head *head_ref,
705                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
706                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
707                      u64 offset, int action)
708 {
709         struct btrfs_delayed_data_ref *full_ref;
710         struct btrfs_delayed_ref_root *delayed_refs;
711         u64 seq = 0;
712         int ret;
713
714         if (action == BTRFS_ADD_DELAYED_EXTENT)
715                 action = BTRFS_ADD_DELAYED_REF;
716
717         delayed_refs = &trans->transaction->delayed_refs;
718
719         if (is_fstree(ref_root))
720                 seq = atomic64_read(&fs_info->tree_mod_seq);
721
722         /* first set the basic ref node struct up */
723         atomic_set(&ref->refs, 1);
724         ref->bytenr = bytenr;
725         ref->num_bytes = num_bytes;
726         ref->ref_mod = 1;
727         ref->action = action;
728         ref->is_head = 0;
729         ref->in_tree = 1;
730         ref->seq = seq;
731
732         full_ref = btrfs_delayed_node_to_data_ref(ref);
733         full_ref->parent = parent;
734         full_ref->root = ref_root;
735         if (parent)
736                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
737         else
738                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
739
740         full_ref->objectid = owner;
741         full_ref->offset = offset;
742
743         trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
744
745         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
746
747         if (ret > 0)
748                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
749 }
750
751 /*
752  * add a delayed tree ref.  This does all of the accounting required
753  * to make sure the delayed ref is eventually processed before this
754  * transaction commits.
755  */
756 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
757                                struct btrfs_trans_handle *trans,
758                                u64 bytenr, u64 num_bytes, u64 parent,
759                                u64 ref_root,  int level, int action,
760                                struct btrfs_delayed_extent_op *extent_op)
761 {
762         struct btrfs_delayed_tree_ref *ref;
763         struct btrfs_delayed_ref_head *head_ref;
764         struct btrfs_delayed_ref_root *delayed_refs;
765         struct btrfs_qgroup_extent_record *record = NULL;
766
767         BUG_ON(extent_op && extent_op->is_data);
768         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
769         if (!ref)
770                 return -ENOMEM;
771
772         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
773         if (!head_ref)
774                 goto free_ref;
775
776         if (fs_info->quota_enabled && is_fstree(ref_root)) {
777                 record = kmalloc(sizeof(*record), GFP_NOFS);
778                 if (!record)
779                         goto free_head_ref;
780         }
781
782         head_ref->extent_op = extent_op;
783
784         delayed_refs = &trans->transaction->delayed_refs;
785         spin_lock(&delayed_refs->lock);
786
787         /*
788          * insert both the head node and the new ref without dropping
789          * the spin lock
790          */
791         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
792                                         bytenr, num_bytes, 0, 0, action, 0);
793
794         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
795                              num_bytes, parent, ref_root, level, action);
796         spin_unlock(&delayed_refs->lock);
797
798         return 0;
799
800 free_head_ref:
801         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
802 free_ref:
803         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
804
805         return -ENOMEM;
806 }
807
808 /*
809  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
810  */
811 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
812                                struct btrfs_trans_handle *trans,
813                                u64 bytenr, u64 num_bytes,
814                                u64 parent, u64 ref_root,
815                                u64 owner, u64 offset, u64 reserved, int action,
816                                struct btrfs_delayed_extent_op *extent_op)
817 {
818         struct btrfs_delayed_data_ref *ref;
819         struct btrfs_delayed_ref_head *head_ref;
820         struct btrfs_delayed_ref_root *delayed_refs;
821         struct btrfs_qgroup_extent_record *record = NULL;
822
823         BUG_ON(extent_op && !extent_op->is_data);
824         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
825         if (!ref)
826                 return -ENOMEM;
827
828         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
829         if (!head_ref) {
830                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
831                 return -ENOMEM;
832         }
833
834         if (fs_info->quota_enabled && is_fstree(ref_root)) {
835                 record = kmalloc(sizeof(*record), GFP_NOFS);
836                 if (!record) {
837                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
838                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
839                                         head_ref);
840                         return -ENOMEM;
841                 }
842         }
843
844         head_ref->extent_op = extent_op;
845
846         delayed_refs = &trans->transaction->delayed_refs;
847         spin_lock(&delayed_refs->lock);
848
849         /*
850          * insert both the head node and the new ref without dropping
851          * the spin lock
852          */
853         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
854                                         bytenr, num_bytes, ref_root, reserved,
855                                         action, 1);
856
857         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
858                                    num_bytes, parent, ref_root, owner, offset,
859                                    action);
860         spin_unlock(&delayed_refs->lock);
861
862         return 0;
863 }
864
865 int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
866                                      struct btrfs_trans_handle *trans,
867                                      u64 ref_root, u64 bytenr, u64 num_bytes)
868 {
869         struct btrfs_delayed_ref_root *delayed_refs;
870         struct btrfs_delayed_ref_head *ref_head;
871         int ret = 0;
872
873         if (!fs_info->quota_enabled || !is_fstree(ref_root))
874                 return 0;
875
876         delayed_refs = &trans->transaction->delayed_refs;
877
878         spin_lock(&delayed_refs->lock);
879         ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
880         if (!ref_head) {
881                 ret = -ENOENT;
882                 goto out;
883         }
884         WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
885         ref_head->qgroup_ref_root = ref_root;
886         ref_head->qgroup_reserved = num_bytes;
887 out:
888         spin_unlock(&delayed_refs->lock);
889         return ret;
890 }
891
892 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
893                                 struct btrfs_trans_handle *trans,
894                                 u64 bytenr, u64 num_bytes,
895                                 struct btrfs_delayed_extent_op *extent_op)
896 {
897         struct btrfs_delayed_ref_head *head_ref;
898         struct btrfs_delayed_ref_root *delayed_refs;
899
900         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
901         if (!head_ref)
902                 return -ENOMEM;
903
904         head_ref->extent_op = extent_op;
905
906         delayed_refs = &trans->transaction->delayed_refs;
907         spin_lock(&delayed_refs->lock);
908
909         add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
910                              num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
911                              extent_op->is_data);
912
913         spin_unlock(&delayed_refs->lock);
914         return 0;
915 }
916
917 /*
918  * this does a simple search for the head node for a given extent.
919  * It must be called with the delayed ref spinlock held, and it returns
920  * the head node if any where found, or NULL if not.
921  */
922 struct btrfs_delayed_ref_head *
923 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
924 {
925         struct btrfs_delayed_ref_root *delayed_refs;
926
927         delayed_refs = &trans->transaction->delayed_refs;
928         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
929 }
930
931 void btrfs_delayed_ref_exit(void)
932 {
933         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
934         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
935         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
936         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
937 }
938
939 int btrfs_delayed_ref_init(void)
940 {
941         btrfs_delayed_ref_head_cachep = kmem_cache_create(
942                                 "btrfs_delayed_ref_head",
943                                 sizeof(struct btrfs_delayed_ref_head), 0,
944                                 SLAB_MEM_SPREAD, NULL);
945         if (!btrfs_delayed_ref_head_cachep)
946                 goto fail;
947
948         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
949                                 "btrfs_delayed_tree_ref",
950                                 sizeof(struct btrfs_delayed_tree_ref), 0,
951                                 SLAB_MEM_SPREAD, NULL);
952         if (!btrfs_delayed_tree_ref_cachep)
953                 goto fail;
954
955         btrfs_delayed_data_ref_cachep = kmem_cache_create(
956                                 "btrfs_delayed_data_ref",
957                                 sizeof(struct btrfs_delayed_data_ref), 0,
958                                 SLAB_MEM_SPREAD, NULL);
959         if (!btrfs_delayed_data_ref_cachep)
960                 goto fail;
961
962         btrfs_delayed_extent_op_cachep = kmem_cache_create(
963                                 "btrfs_delayed_extent_op",
964                                 sizeof(struct btrfs_delayed_extent_op), 0,
965                                 SLAB_MEM_SPREAD, NULL);
966         if (!btrfs_delayed_extent_op_cachep)
967                 goto fail;
968
969         return 0;
970 fail:
971         btrfs_delayed_ref_exit();
972         return -ENOMEM;
973 }