Merge branch 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44                           struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47                 if (ref1->root < ref2->root)
48                         return -1;
49                 if (ref1->root > ref2->root)
50                         return 1;
51         } else {
52                 if (ref1->parent < ref2->parent)
53                         return -1;
54                 if (ref1->parent > ref2->parent)
55                         return 1;
56         }
57         return 0;
58 }
59
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64                           struct btrfs_delayed_data_ref *ref1)
65 {
66         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67                 if (ref1->root < ref2->root)
68                         return -1;
69                 if (ref1->root > ref2->root)
70                         return 1;
71                 if (ref1->objectid < ref2->objectid)
72                         return -1;
73                 if (ref1->objectid > ref2->objectid)
74                         return 1;
75                 if (ref1->offset < ref2->offset)
76                         return -1;
77                 if (ref1->offset > ref2->offset)
78                         return 1;
79         } else {
80                 if (ref1->parent < ref2->parent)
81                         return -1;
82                 if (ref1->parent > ref2->parent)
83                         return 1;
84         }
85         return 0;
86 }
87
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90                                                    struct rb_node *node)
91 {
92         struct rb_node **p = &root->rb_node;
93         struct rb_node *parent_node = NULL;
94         struct btrfs_delayed_ref_head *entry;
95         struct btrfs_delayed_ref_head *ins;
96         u64 bytenr;
97
98         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99         bytenr = ins->node.bytenr;
100         while (*p) {
101                 parent_node = *p;
102                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103                                  href_node);
104
105                 if (bytenr < entry->node.bytenr)
106                         p = &(*p)->rb_left;
107                 else if (bytenr > entry->node.bytenr)
108                         p = &(*p)->rb_right;
109                 else
110                         return entry;
111         }
112
113         rb_link_node(node, parent_node, p);
114         rb_insert_color(node, root);
115         return NULL;
116 }
117
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126               int return_bigger)
127 {
128         struct rb_node *n;
129         struct btrfs_delayed_ref_head *entry;
130
131         n = root->rb_node;
132         entry = NULL;
133         while (n) {
134                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136                 if (bytenr < entry->node.bytenr)
137                         n = n->rb_left;
138                 else if (bytenr > entry->node.bytenr)
139                         n = n->rb_right;
140                 else
141                         return entry;
142         }
143         if (entry && return_bigger) {
144                 if (bytenr > entry->node.bytenr) {
145                         n = rb_next(&entry->href_node);
146                         if (!n)
147                                 n = rb_first(root);
148                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
149                                          href_node);
150                         return entry;
151                 }
152                 return entry;
153         }
154         return NULL;
155 }
156
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158                            struct btrfs_delayed_ref_head *head)
159 {
160         struct btrfs_delayed_ref_root *delayed_refs;
161
162         delayed_refs = &trans->transaction->delayed_refs;
163         assert_spin_locked(&delayed_refs->lock);
164         if (mutex_trylock(&head->mutex))
165                 return 0;
166
167         atomic_inc(&head->node.refs);
168         spin_unlock(&delayed_refs->lock);
169
170         mutex_lock(&head->mutex);
171         spin_lock(&delayed_refs->lock);
172         if (!head->node.in_tree) {
173                 mutex_unlock(&head->mutex);
174                 btrfs_put_delayed_ref(&head->node);
175                 return -EAGAIN;
176         }
177         btrfs_put_delayed_ref(&head->node);
178         return 0;
179 }
180
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182                                     struct btrfs_delayed_ref_root *delayed_refs,
183                                     struct btrfs_delayed_ref_head *head,
184                                     struct btrfs_delayed_ref_node *ref)
185 {
186         if (btrfs_delayed_ref_is_head(ref)) {
187                 head = btrfs_delayed_node_to_head(ref);
188                 rb_erase(&head->href_node, &delayed_refs->href_root);
189         } else {
190                 assert_spin_locked(&head->lock);
191                 list_del(&ref->list);
192         }
193         ref->in_tree = 0;
194         btrfs_put_delayed_ref(ref);
195         atomic_dec(&delayed_refs->num_entries);
196         if (trans->delayed_ref_updates)
197                 trans->delayed_ref_updates--;
198 }
199
200 static bool merge_ref(struct btrfs_trans_handle *trans,
201                       struct btrfs_delayed_ref_root *delayed_refs,
202                       struct btrfs_delayed_ref_head *head,
203                       struct btrfs_delayed_ref_node *ref,
204                       u64 seq)
205 {
206         struct btrfs_delayed_ref_node *next;
207         bool done = false;
208
209         next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210                                 list);
211         while (!done && &next->list != &head->ref_list) {
212                 int mod;
213                 struct btrfs_delayed_ref_node *next2;
214
215                 next2 = list_next_entry(next, list);
216
217                 if (next == ref)
218                         goto next;
219
220                 if (seq && next->seq >= seq)
221                         goto next;
222
223                 if (next->type != ref->type)
224                         goto next;
225
226                 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227                      ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228                     comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229                                    btrfs_delayed_node_to_tree_ref(next),
230                                    ref->type))
231                         goto next;
232                 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233                      ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234                     comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235                                    btrfs_delayed_node_to_data_ref(next)))
236                         goto next;
237
238                 if (ref->action == next->action) {
239                         mod = next->ref_mod;
240                 } else {
241                         if (ref->ref_mod < next->ref_mod) {
242                                 swap(ref, next);
243                                 done = true;
244                         }
245                         mod = -next->ref_mod;
246                 }
247
248                 drop_delayed_ref(trans, delayed_refs, head, next);
249                 ref->ref_mod += mod;
250                 if (ref->ref_mod == 0) {
251                         drop_delayed_ref(trans, delayed_refs, head, ref);
252                         done = true;
253                 } else {
254                         /*
255                          * Can't have multiples of the same ref on a tree block.
256                          */
257                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259                 }
260 next:
261                 next = next2;
262         }
263
264         return done;
265 }
266
267 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268                               struct btrfs_fs_info *fs_info,
269                               struct btrfs_delayed_ref_root *delayed_refs,
270                               struct btrfs_delayed_ref_head *head)
271 {
272         struct btrfs_delayed_ref_node *ref;
273         u64 seq = 0;
274
275         assert_spin_locked(&head->lock);
276
277         if (list_empty(&head->ref_list))
278                 return;
279
280         /* We don't have too many refs to merge for data. */
281         if (head->is_data)
282                 return;
283
284         spin_lock(&fs_info->tree_mod_seq_lock);
285         if (!list_empty(&fs_info->tree_mod_seq_list)) {
286                 struct seq_list *elem;
287
288                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
289                                         struct seq_list, list);
290                 seq = elem->seq;
291         }
292         spin_unlock(&fs_info->tree_mod_seq_lock);
293
294         ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295                                list);
296         while (&ref->list != &head->ref_list) {
297                 if (seq && ref->seq >= seq)
298                         goto next;
299
300                 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301                         if (list_empty(&head->ref_list))
302                                 break;
303                         ref = list_first_entry(&head->ref_list,
304                                                struct btrfs_delayed_ref_node,
305                                                list);
306                         continue;
307                 }
308 next:
309                 ref = list_next_entry(ref, list);
310         }
311 }
312
313 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314                             struct btrfs_delayed_ref_root *delayed_refs,
315                             u64 seq)
316 {
317         struct seq_list *elem;
318         int ret = 0;
319
320         spin_lock(&fs_info->tree_mod_seq_lock);
321         if (!list_empty(&fs_info->tree_mod_seq_list)) {
322                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
323                                         struct seq_list, list);
324                 if (seq >= elem->seq) {
325                         btrfs_debug(fs_info,
326                                 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
327                                 (u32)(seq >> 32), (u32)seq,
328                                 (u32)(elem->seq >> 32), (u32)elem->seq,
329                                 delayed_refs);
330                         ret = 1;
331                 }
332         }
333
334         spin_unlock(&fs_info->tree_mod_seq_lock);
335         return ret;
336 }
337
338 struct btrfs_delayed_ref_head *
339 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
340 {
341         struct btrfs_delayed_ref_root *delayed_refs;
342         struct btrfs_delayed_ref_head *head;
343         u64 start;
344         bool loop = false;
345
346         delayed_refs = &trans->transaction->delayed_refs;
347
348 again:
349         start = delayed_refs->run_delayed_start;
350         head = find_ref_head(&delayed_refs->href_root, start, 1);
351         if (!head && !loop) {
352                 delayed_refs->run_delayed_start = 0;
353                 start = 0;
354                 loop = true;
355                 head = find_ref_head(&delayed_refs->href_root, start, 1);
356                 if (!head)
357                         return NULL;
358         } else if (!head && loop) {
359                 return NULL;
360         }
361
362         while (head->processing) {
363                 struct rb_node *node;
364
365                 node = rb_next(&head->href_node);
366                 if (!node) {
367                         if (loop)
368                                 return NULL;
369                         delayed_refs->run_delayed_start = 0;
370                         start = 0;
371                         loop = true;
372                         goto again;
373                 }
374                 head = rb_entry(node, struct btrfs_delayed_ref_head,
375                                 href_node);
376         }
377
378         head->processing = 1;
379         WARN_ON(delayed_refs->num_heads_ready == 0);
380         delayed_refs->num_heads_ready--;
381         delayed_refs->run_delayed_start = head->node.bytenr +
382                 head->node.num_bytes;
383         return head;
384 }
385
386 /*
387  * Helper to insert the ref_node to the tail or merge with tail.
388  *
389  * Return 0 for insert.
390  * Return >0 for merge.
391  */
392 static int
393 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
394                            struct btrfs_delayed_ref_root *root,
395                            struct btrfs_delayed_ref_head *href,
396                            struct btrfs_delayed_ref_node *ref)
397 {
398         struct btrfs_delayed_ref_node *exist;
399         int mod;
400         int ret = 0;
401
402         spin_lock(&href->lock);
403         /* Check whether we can merge the tail node with ref */
404         if (list_empty(&href->ref_list))
405                 goto add_tail;
406         exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
407                            list);
408         /* No need to compare bytenr nor is_head */
409         if (exist->type != ref->type || exist->seq != ref->seq)
410                 goto add_tail;
411
412         if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
413              exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
414             comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
415                            btrfs_delayed_node_to_tree_ref(ref),
416                            ref->type))
417                 goto add_tail;
418         if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
419              exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
420             comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
421                            btrfs_delayed_node_to_data_ref(ref)))
422                 goto add_tail;
423
424         /* Now we are sure we can merge */
425         ret = 1;
426         if (exist->action == ref->action) {
427                 mod = ref->ref_mod;
428         } else {
429                 /* Need to change action */
430                 if (exist->ref_mod < ref->ref_mod) {
431                         exist->action = ref->action;
432                         mod = -exist->ref_mod;
433                         exist->ref_mod = ref->ref_mod;
434                 } else
435                         mod = -ref->ref_mod;
436         }
437         exist->ref_mod += mod;
438
439         /* remove existing tail if its ref_mod is zero */
440         if (exist->ref_mod == 0)
441                 drop_delayed_ref(trans, root, href, exist);
442         spin_unlock(&href->lock);
443         return ret;
444
445 add_tail:
446         list_add_tail(&ref->list, &href->ref_list);
447         atomic_inc(&root->num_entries);
448         trans->delayed_ref_updates++;
449         spin_unlock(&href->lock);
450         return ret;
451 }
452
453 /*
454  * helper function to update the accounting in the head ref
455  * existing and update must have the same bytenr
456  */
457 static noinline void
458 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
459                          struct btrfs_delayed_ref_node *existing,
460                          struct btrfs_delayed_ref_node *update)
461 {
462         struct btrfs_delayed_ref_head *existing_ref;
463         struct btrfs_delayed_ref_head *ref;
464         int old_ref_mod;
465
466         existing_ref = btrfs_delayed_node_to_head(existing);
467         ref = btrfs_delayed_node_to_head(update);
468         BUG_ON(existing_ref->is_data != ref->is_data);
469
470         spin_lock(&existing_ref->lock);
471         if (ref->must_insert_reserved) {
472                 /* if the extent was freed and then
473                  * reallocated before the delayed ref
474                  * entries were processed, we can end up
475                  * with an existing head ref without
476                  * the must_insert_reserved flag set.
477                  * Set it again here
478                  */
479                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
480
481                 /*
482                  * update the num_bytes so we make sure the accounting
483                  * is done correctly
484                  */
485                 existing->num_bytes = update->num_bytes;
486
487         }
488
489         if (ref->extent_op) {
490                 if (!existing_ref->extent_op) {
491                         existing_ref->extent_op = ref->extent_op;
492                 } else {
493                         if (ref->extent_op->update_key) {
494                                 memcpy(&existing_ref->extent_op->key,
495                                        &ref->extent_op->key,
496                                        sizeof(ref->extent_op->key));
497                                 existing_ref->extent_op->update_key = true;
498                         }
499                         if (ref->extent_op->update_flags) {
500                                 existing_ref->extent_op->flags_to_set |=
501                                         ref->extent_op->flags_to_set;
502                                 existing_ref->extent_op->update_flags = true;
503                         }
504                         btrfs_free_delayed_extent_op(ref->extent_op);
505                 }
506         }
507         /*
508          * update the reference mod on the head to reflect this new operation,
509          * only need the lock for this case cause we could be processing it
510          * currently, for refs we just added we know we're a-ok.
511          */
512         old_ref_mod = existing_ref->total_ref_mod;
513         existing->ref_mod += update->ref_mod;
514         existing_ref->total_ref_mod += update->ref_mod;
515
516         /*
517          * If we are going to from a positive ref mod to a negative or vice
518          * versa we need to make sure to adjust pending_csums accordingly.
519          */
520         if (existing_ref->is_data) {
521                 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
522                         delayed_refs->pending_csums -= existing->num_bytes;
523                 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
524                         delayed_refs->pending_csums += existing->num_bytes;
525         }
526         spin_unlock(&existing_ref->lock);
527 }
528
529 /*
530  * helper function to actually insert a head node into the rbtree.
531  * this does all the dirty work in terms of maintaining the correct
532  * overall modification count.
533  */
534 static noinline struct btrfs_delayed_ref_head *
535 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
536                      struct btrfs_trans_handle *trans,
537                      struct btrfs_delayed_ref_node *ref,
538                      struct btrfs_qgroup_extent_record *qrecord,
539                      u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
540                      int action, int is_data)
541 {
542         struct btrfs_delayed_ref_head *existing;
543         struct btrfs_delayed_ref_head *head_ref = NULL;
544         struct btrfs_delayed_ref_root *delayed_refs;
545         int count_mod = 1;
546         int must_insert_reserved = 0;
547
548         /* If reserved is provided, it must be a data extent. */
549         BUG_ON(!is_data && reserved);
550
551         /*
552          * the head node stores the sum of all the mods, so dropping a ref
553          * should drop the sum in the head node by one.
554          */
555         if (action == BTRFS_UPDATE_DELAYED_HEAD)
556                 count_mod = 0;
557         else if (action == BTRFS_DROP_DELAYED_REF)
558                 count_mod = -1;
559
560         /*
561          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562          * the reserved accounting when the extent is finally added, or
563          * if a later modification deletes the delayed ref without ever
564          * inserting the extent into the extent allocation tree.
565          * ref->must_insert_reserved is the flag used to record
566          * that accounting mods are required.
567          *
568          * Once we record must_insert_reserved, switch the action to
569          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
570          */
571         if (action == BTRFS_ADD_DELAYED_EXTENT)
572                 must_insert_reserved = 1;
573         else
574                 must_insert_reserved = 0;
575
576         delayed_refs = &trans->transaction->delayed_refs;
577
578         /* first set the basic ref node struct up */
579         atomic_set(&ref->refs, 1);
580         ref->bytenr = bytenr;
581         ref->num_bytes = num_bytes;
582         ref->ref_mod = count_mod;
583         ref->type  = 0;
584         ref->action  = 0;
585         ref->is_head = 1;
586         ref->in_tree = 1;
587         ref->seq = 0;
588
589         head_ref = btrfs_delayed_node_to_head(ref);
590         head_ref->must_insert_reserved = must_insert_reserved;
591         head_ref->is_data = is_data;
592         INIT_LIST_HEAD(&head_ref->ref_list);
593         head_ref->processing = 0;
594         head_ref->total_ref_mod = count_mod;
595         head_ref->qgroup_reserved = 0;
596         head_ref->qgroup_ref_root = 0;
597
598         /* Record qgroup extent info if provided */
599         if (qrecord) {
600                 if (ref_root && reserved) {
601                         head_ref->qgroup_ref_root = ref_root;
602                         head_ref->qgroup_reserved = reserved;
603                 }
604
605                 qrecord->bytenr = bytenr;
606                 qrecord->num_bytes = num_bytes;
607                 qrecord->old_roots = NULL;
608
609                 if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
610                                         delayed_refs, qrecord))
611                         kfree(qrecord);
612         }
613
614         spin_lock_init(&head_ref->lock);
615         mutex_init(&head_ref->mutex);
616
617         trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
618
619         existing = htree_insert(&delayed_refs->href_root,
620                                 &head_ref->href_node);
621         if (existing) {
622                 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
623                         && existing->qgroup_reserved);
624                 update_existing_head_ref(delayed_refs, &existing->node, ref);
625                 /*
626                  * we've updated the existing ref, free the newly
627                  * allocated ref
628                  */
629                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
630                 head_ref = existing;
631         } else {
632                 if (is_data && count_mod < 0)
633                         delayed_refs->pending_csums += num_bytes;
634                 delayed_refs->num_heads++;
635                 delayed_refs->num_heads_ready++;
636                 atomic_inc(&delayed_refs->num_entries);
637                 trans->delayed_ref_updates++;
638         }
639         return head_ref;
640 }
641
642 /*
643  * helper to insert a delayed tree ref into the rbtree.
644  */
645 static noinline void
646 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
647                      struct btrfs_trans_handle *trans,
648                      struct btrfs_delayed_ref_head *head_ref,
649                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
650                      u64 num_bytes, u64 parent, u64 ref_root, int level,
651                      int action)
652 {
653         struct btrfs_delayed_tree_ref *full_ref;
654         struct btrfs_delayed_ref_root *delayed_refs;
655         u64 seq = 0;
656         int ret;
657
658         if (action == BTRFS_ADD_DELAYED_EXTENT)
659                 action = BTRFS_ADD_DELAYED_REF;
660
661         if (is_fstree(ref_root))
662                 seq = atomic64_read(&fs_info->tree_mod_seq);
663         delayed_refs = &trans->transaction->delayed_refs;
664
665         /* first set the basic ref node struct up */
666         atomic_set(&ref->refs, 1);
667         ref->bytenr = bytenr;
668         ref->num_bytes = num_bytes;
669         ref->ref_mod = 1;
670         ref->action = action;
671         ref->is_head = 0;
672         ref->in_tree = 1;
673         ref->seq = seq;
674
675         full_ref = btrfs_delayed_node_to_tree_ref(ref);
676         full_ref->parent = parent;
677         full_ref->root = ref_root;
678         if (parent)
679                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
680         else
681                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
682         full_ref->level = level;
683
684         trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
685
686         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
687
688         /*
689          * XXX: memory should be freed at the same level allocated.
690          * But bad practice is anywhere... Follow it now. Need cleanup.
691          */
692         if (ret > 0)
693                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
694 }
695
696 /*
697  * helper to insert a delayed data ref into the rbtree.
698  */
699 static noinline void
700 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
701                      struct btrfs_trans_handle *trans,
702                      struct btrfs_delayed_ref_head *head_ref,
703                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
704                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
705                      u64 offset, int action)
706 {
707         struct btrfs_delayed_data_ref *full_ref;
708         struct btrfs_delayed_ref_root *delayed_refs;
709         u64 seq = 0;
710         int ret;
711
712         if (action == BTRFS_ADD_DELAYED_EXTENT)
713                 action = BTRFS_ADD_DELAYED_REF;
714
715         delayed_refs = &trans->transaction->delayed_refs;
716
717         if (is_fstree(ref_root))
718                 seq = atomic64_read(&fs_info->tree_mod_seq);
719
720         /* first set the basic ref node struct up */
721         atomic_set(&ref->refs, 1);
722         ref->bytenr = bytenr;
723         ref->num_bytes = num_bytes;
724         ref->ref_mod = 1;
725         ref->action = action;
726         ref->is_head = 0;
727         ref->in_tree = 1;
728         ref->seq = seq;
729
730         full_ref = btrfs_delayed_node_to_data_ref(ref);
731         full_ref->parent = parent;
732         full_ref->root = ref_root;
733         if (parent)
734                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
735         else
736                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
737
738         full_ref->objectid = owner;
739         full_ref->offset = offset;
740
741         trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
742
743         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
744
745         if (ret > 0)
746                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
747 }
748
749 /*
750  * add a delayed tree ref.  This does all of the accounting required
751  * to make sure the delayed ref is eventually processed before this
752  * transaction commits.
753  */
754 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
755                                struct btrfs_trans_handle *trans,
756                                u64 bytenr, u64 num_bytes, u64 parent,
757                                u64 ref_root,  int level, int action,
758                                struct btrfs_delayed_extent_op *extent_op)
759 {
760         struct btrfs_delayed_tree_ref *ref;
761         struct btrfs_delayed_ref_head *head_ref;
762         struct btrfs_delayed_ref_root *delayed_refs;
763         struct btrfs_qgroup_extent_record *record = NULL;
764
765         BUG_ON(extent_op && extent_op->is_data);
766         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
767         if (!ref)
768                 return -ENOMEM;
769
770         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
771         if (!head_ref)
772                 goto free_ref;
773
774         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
775             is_fstree(ref_root)) {
776                 record = kmalloc(sizeof(*record), GFP_NOFS);
777                 if (!record)
778                         goto free_head_ref;
779         }
780
781         head_ref->extent_op = extent_op;
782
783         delayed_refs = &trans->transaction->delayed_refs;
784         spin_lock(&delayed_refs->lock);
785
786         /*
787          * insert both the head node and the new ref without dropping
788          * the spin lock
789          */
790         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
791                                         bytenr, num_bytes, 0, 0, action, 0);
792
793         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
794                              num_bytes, parent, ref_root, level, action);
795         spin_unlock(&delayed_refs->lock);
796
797         return 0;
798
799 free_head_ref:
800         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
801 free_ref:
802         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
803
804         return -ENOMEM;
805 }
806
807 /*
808  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
809  */
810 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
811                                struct btrfs_trans_handle *trans,
812                                u64 bytenr, u64 num_bytes,
813                                u64 parent, u64 ref_root,
814                                u64 owner, u64 offset, u64 reserved, int action,
815                                struct btrfs_delayed_extent_op *extent_op)
816 {
817         struct btrfs_delayed_data_ref *ref;
818         struct btrfs_delayed_ref_head *head_ref;
819         struct btrfs_delayed_ref_root *delayed_refs;
820         struct btrfs_qgroup_extent_record *record = NULL;
821
822         BUG_ON(extent_op && !extent_op->is_data);
823         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
824         if (!ref)
825                 return -ENOMEM;
826
827         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
828         if (!head_ref) {
829                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830                 return -ENOMEM;
831         }
832
833         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
834             is_fstree(ref_root)) {
835                 record = kmalloc(sizeof(*record), GFP_NOFS);
836                 if (!record) {
837                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
838                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
839                                         head_ref);
840                         return -ENOMEM;
841                 }
842         }
843
844         head_ref->extent_op = extent_op;
845
846         delayed_refs = &trans->transaction->delayed_refs;
847         spin_lock(&delayed_refs->lock);
848
849         /*
850          * insert both the head node and the new ref without dropping
851          * the spin lock
852          */
853         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
854                                         bytenr, num_bytes, ref_root, reserved,
855                                         action, 1);
856
857         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
858                                    num_bytes, parent, ref_root, owner, offset,
859                                    action);
860         spin_unlock(&delayed_refs->lock);
861
862         return 0;
863 }
864
865 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
866                                 struct btrfs_trans_handle *trans,
867                                 u64 bytenr, u64 num_bytes,
868                                 struct btrfs_delayed_extent_op *extent_op)
869 {
870         struct btrfs_delayed_ref_head *head_ref;
871         struct btrfs_delayed_ref_root *delayed_refs;
872
873         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
874         if (!head_ref)
875                 return -ENOMEM;
876
877         head_ref->extent_op = extent_op;
878
879         delayed_refs = &trans->transaction->delayed_refs;
880         spin_lock(&delayed_refs->lock);
881
882         add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
883                              num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
884                              extent_op->is_data);
885
886         spin_unlock(&delayed_refs->lock);
887         return 0;
888 }
889
890 /*
891  * this does a simple search for the head node for a given extent.
892  * It must be called with the delayed ref spinlock held, and it returns
893  * the head node if any where found, or NULL if not.
894  */
895 struct btrfs_delayed_ref_head *
896 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
897 {
898         struct btrfs_delayed_ref_root *delayed_refs;
899
900         delayed_refs = &trans->transaction->delayed_refs;
901         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
902 }
903
904 void btrfs_delayed_ref_exit(void)
905 {
906         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
907         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
908         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
909         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
910 }
911
912 int btrfs_delayed_ref_init(void)
913 {
914         btrfs_delayed_ref_head_cachep = kmem_cache_create(
915                                 "btrfs_delayed_ref_head",
916                                 sizeof(struct btrfs_delayed_ref_head), 0,
917                                 SLAB_MEM_SPREAD, NULL);
918         if (!btrfs_delayed_ref_head_cachep)
919                 goto fail;
920
921         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
922                                 "btrfs_delayed_tree_ref",
923                                 sizeof(struct btrfs_delayed_tree_ref), 0,
924                                 SLAB_MEM_SPREAD, NULL);
925         if (!btrfs_delayed_tree_ref_cachep)
926                 goto fail;
927
928         btrfs_delayed_data_ref_cachep = kmem_cache_create(
929                                 "btrfs_delayed_data_ref",
930                                 sizeof(struct btrfs_delayed_data_ref), 0,
931                                 SLAB_MEM_SPREAD, NULL);
932         if (!btrfs_delayed_data_ref_cachep)
933                 goto fail;
934
935         btrfs_delayed_extent_op_cachep = kmem_cache_create(
936                                 "btrfs_delayed_extent_op",
937                                 sizeof(struct btrfs_delayed_extent_op), 0,
938                                 SLAB_MEM_SPREAD, NULL);
939         if (!btrfs_delayed_extent_op_cachep)
940                 goto fail;
941
942         return 0;
943 fail:
944         btrfs_delayed_ref_exit();
945         return -ENOMEM;
946 }