Btrfs: fix joining same transaction handle more than twice
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108 int btrfs_pin_extent(struct btrfs_root *root,
109                      u64 bytenr, u64 num_bytes, int reserved);
110
111 static noinline int
112 block_group_cache_done(struct btrfs_block_group_cache *cache)
113 {
114         smp_mb();
115         return cache->cached == BTRFS_CACHE_FINISHED ||
116                 cache->cached == BTRFS_CACHE_ERROR;
117 }
118
119 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
120 {
121         return (cache->flags & bits) == bits;
122 }
123
124 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
125 {
126         atomic_inc(&cache->count);
127 }
128
129 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
130 {
131         if (atomic_dec_and_test(&cache->count)) {
132                 WARN_ON(cache->pinned > 0);
133                 WARN_ON(cache->reserved > 0);
134                 kfree(cache->free_space_ctl);
135                 kfree(cache);
136         }
137 }
138
139 /*
140  * this adds the block group to the fs_info rb tree for the block group
141  * cache
142  */
143 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
144                                 struct btrfs_block_group_cache *block_group)
145 {
146         struct rb_node **p;
147         struct rb_node *parent = NULL;
148         struct btrfs_block_group_cache *cache;
149
150         spin_lock(&info->block_group_cache_lock);
151         p = &info->block_group_cache_tree.rb_node;
152
153         while (*p) {
154                 parent = *p;
155                 cache = rb_entry(parent, struct btrfs_block_group_cache,
156                                  cache_node);
157                 if (block_group->key.objectid < cache->key.objectid) {
158                         p = &(*p)->rb_left;
159                 } else if (block_group->key.objectid > cache->key.objectid) {
160                         p = &(*p)->rb_right;
161                 } else {
162                         spin_unlock(&info->block_group_cache_lock);
163                         return -EEXIST;
164                 }
165         }
166
167         rb_link_node(&block_group->cache_node, parent, p);
168         rb_insert_color(&block_group->cache_node,
169                         &info->block_group_cache_tree);
170
171         if (info->first_logical_byte > block_group->key.objectid)
172                 info->first_logical_byte = block_group->key.objectid;
173
174         spin_unlock(&info->block_group_cache_lock);
175
176         return 0;
177 }
178
179 /*
180  * This will return the block group at or after bytenr if contains is 0, else
181  * it will return the block group that contains the bytenr
182  */
183 static struct btrfs_block_group_cache *
184 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
185                               int contains)
186 {
187         struct btrfs_block_group_cache *cache, *ret = NULL;
188         struct rb_node *n;
189         u64 end, start;
190
191         spin_lock(&info->block_group_cache_lock);
192         n = info->block_group_cache_tree.rb_node;
193
194         while (n) {
195                 cache = rb_entry(n, struct btrfs_block_group_cache,
196                                  cache_node);
197                 end = cache->key.objectid + cache->key.offset - 1;
198                 start = cache->key.objectid;
199
200                 if (bytenr < start) {
201                         if (!contains && (!ret || start < ret->key.objectid))
202                                 ret = cache;
203                         n = n->rb_left;
204                 } else if (bytenr > start) {
205                         if (contains && bytenr <= end) {
206                                 ret = cache;
207                                 break;
208                         }
209                         n = n->rb_right;
210                 } else {
211                         ret = cache;
212                         break;
213                 }
214         }
215         if (ret) {
216                 btrfs_get_block_group(ret);
217                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
218                         info->first_logical_byte = ret->key.objectid;
219         }
220         spin_unlock(&info->block_group_cache_lock);
221
222         return ret;
223 }
224
225 static int add_excluded_extent(struct btrfs_root *root,
226                                u64 start, u64 num_bytes)
227 {
228         u64 end = start + num_bytes - 1;
229         set_extent_bits(&root->fs_info->freed_extents[0],
230                         start, end, EXTENT_UPTODATE, GFP_NOFS);
231         set_extent_bits(&root->fs_info->freed_extents[1],
232                         start, end, EXTENT_UPTODATE, GFP_NOFS);
233         return 0;
234 }
235
236 static void free_excluded_extents(struct btrfs_root *root,
237                                   struct btrfs_block_group_cache *cache)
238 {
239         u64 start, end;
240
241         start = cache->key.objectid;
242         end = start + cache->key.offset - 1;
243
244         clear_extent_bits(&root->fs_info->freed_extents[0],
245                           start, end, EXTENT_UPTODATE, GFP_NOFS);
246         clear_extent_bits(&root->fs_info->freed_extents[1],
247                           start, end, EXTENT_UPTODATE, GFP_NOFS);
248 }
249
250 static int exclude_super_stripes(struct btrfs_root *root,
251                                  struct btrfs_block_group_cache *cache)
252 {
253         u64 bytenr;
254         u64 *logical;
255         int stripe_len;
256         int i, nr, ret;
257
258         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
259                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
260                 cache->bytes_super += stripe_len;
261                 ret = add_excluded_extent(root, cache->key.objectid,
262                                           stripe_len);
263                 if (ret)
264                         return ret;
265         }
266
267         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
268                 bytenr = btrfs_sb_offset(i);
269                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
270                                        cache->key.objectid, bytenr,
271                                        0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(root, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (cache->cached != BTRFS_CACHE_STARTED) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         /* We're loading it the fast way, so we don't have a caching_ctl. */
320         if (!cache->caching_ctl) {
321                 spin_unlock(&cache->lock);
322                 return NULL;
323         }
324
325         ctl = cache->caching_ctl;
326         atomic_inc(&ctl->count);
327         spin_unlock(&cache->lock);
328         return ctl;
329 }
330
331 static void put_caching_control(struct btrfs_caching_control *ctl)
332 {
333         if (atomic_dec_and_test(&ctl->count))
334                 kfree(ctl);
335 }
336
337 /*
338  * this is only called by cache_block_group, since we could have freed extents
339  * we need to check the pinned_extents for any extents that can't be used yet
340  * since their free space will be released as soon as the transaction commits.
341  */
342 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
343                               struct btrfs_fs_info *info, u64 start, u64 end)
344 {
345         u64 extent_start, extent_end, size, total_added = 0;
346         int ret;
347
348         while (start < end) {
349                 ret = find_first_extent_bit(info->pinned_extents, start,
350                                             &extent_start, &extent_end,
351                                             EXTENT_DIRTY | EXTENT_UPTODATE,
352                                             NULL);
353                 if (ret)
354                         break;
355
356                 if (extent_start <= start) {
357                         start = extent_end + 1;
358                 } else if (extent_start > start && extent_start < end) {
359                         size = extent_start - start;
360                         total_added += size;
361                         ret = btrfs_add_free_space(block_group, start,
362                                                    size);
363                         BUG_ON(ret); /* -ENOMEM or logic error */
364                         start = extent_end + 1;
365                 } else {
366                         break;
367                 }
368         }
369
370         if (start < end) {
371                 size = end - start;
372                 total_added += size;
373                 ret = btrfs_add_free_space(block_group, start, size);
374                 BUG_ON(ret); /* -ENOMEM or logic error */
375         }
376
377         return total_added;
378 }
379
380 static noinline void caching_thread(struct btrfs_work *work)
381 {
382         struct btrfs_block_group_cache *block_group;
383         struct btrfs_fs_info *fs_info;
384         struct btrfs_caching_control *caching_ctl;
385         struct btrfs_root *extent_root;
386         struct btrfs_path *path;
387         struct extent_buffer *leaf;
388         struct btrfs_key key;
389         u64 total_found = 0;
390         u64 last = 0;
391         u32 nritems;
392         int ret = -ENOMEM;
393
394         caching_ctl = container_of(work, struct btrfs_caching_control, work);
395         block_group = caching_ctl->block_group;
396         fs_info = block_group->fs_info;
397         extent_root = fs_info->extent_root;
398
399         path = btrfs_alloc_path();
400         if (!path)
401                 goto out;
402
403         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
404
405         /*
406          * We don't want to deadlock with somebody trying to allocate a new
407          * extent for the extent root while also trying to search the extent
408          * root to add free space.  So we skip locking and search the commit
409          * root, since its read-only
410          */
411         path->skip_locking = 1;
412         path->search_commit_root = 1;
413         path->reada = 1;
414
415         key.objectid = last;
416         key.offset = 0;
417         key.type = BTRFS_EXTENT_ITEM_KEY;
418 again:
419         mutex_lock(&caching_ctl->mutex);
420         /* need to make sure the commit_root doesn't disappear */
421         down_read(&fs_info->commit_root_sem);
422
423 next:
424         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
425         if (ret < 0)
426                 goto err;
427
428         leaf = path->nodes[0];
429         nritems = btrfs_header_nritems(leaf);
430
431         while (1) {
432                 if (btrfs_fs_closing(fs_info) > 1) {
433                         last = (u64)-1;
434                         break;
435                 }
436
437                 if (path->slots[0] < nritems) {
438                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
439                 } else {
440                         ret = find_next_key(path, 0, &key);
441                         if (ret)
442                                 break;
443
444                         if (need_resched() ||
445                             rwsem_is_contended(&fs_info->commit_root_sem)) {
446                                 caching_ctl->progress = last;
447                                 btrfs_release_path(path);
448                                 up_read(&fs_info->commit_root_sem);
449                                 mutex_unlock(&caching_ctl->mutex);
450                                 cond_resched();
451                                 goto again;
452                         }
453
454                         ret = btrfs_next_leaf(extent_root, path);
455                         if (ret < 0)
456                                 goto err;
457                         if (ret)
458                                 break;
459                         leaf = path->nodes[0];
460                         nritems = btrfs_header_nritems(leaf);
461                         continue;
462                 }
463
464                 if (key.objectid < last) {
465                         key.objectid = last;
466                         key.offset = 0;
467                         key.type = BTRFS_EXTENT_ITEM_KEY;
468
469                         caching_ctl->progress = last;
470                         btrfs_release_path(path);
471                         goto next;
472                 }
473
474                 if (key.objectid < block_group->key.objectid) {
475                         path->slots[0]++;
476                         continue;
477                 }
478
479                 if (key.objectid >= block_group->key.objectid +
480                     block_group->key.offset)
481                         break;
482
483                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
484                     key.type == BTRFS_METADATA_ITEM_KEY) {
485                         total_found += add_new_free_space(block_group,
486                                                           fs_info, last,
487                                                           key.objectid);
488                         if (key.type == BTRFS_METADATA_ITEM_KEY)
489                                 last = key.objectid +
490                                         fs_info->tree_root->leafsize;
491                         else
492                                 last = key.objectid + key.offset;
493
494                         if (total_found > (1024 * 1024 * 2)) {
495                                 total_found = 0;
496                                 wake_up(&caching_ctl->wait);
497                         }
498                 }
499                 path->slots[0]++;
500         }
501         ret = 0;
502
503         total_found += add_new_free_space(block_group, fs_info, last,
504                                           block_group->key.objectid +
505                                           block_group->key.offset);
506         caching_ctl->progress = (u64)-1;
507
508         spin_lock(&block_group->lock);
509         block_group->caching_ctl = NULL;
510         block_group->cached = BTRFS_CACHE_FINISHED;
511         spin_unlock(&block_group->lock);
512
513 err:
514         btrfs_free_path(path);
515         up_read(&fs_info->commit_root_sem);
516
517         free_excluded_extents(extent_root, block_group);
518
519         mutex_unlock(&caching_ctl->mutex);
520 out:
521         if (ret) {
522                 spin_lock(&block_group->lock);
523                 block_group->caching_ctl = NULL;
524                 block_group->cached = BTRFS_CACHE_ERROR;
525                 spin_unlock(&block_group->lock);
526         }
527         wake_up(&caching_ctl->wait);
528
529         put_caching_control(caching_ctl);
530         btrfs_put_block_group(block_group);
531 }
532
533 static int cache_block_group(struct btrfs_block_group_cache *cache,
534                              int load_cache_only)
535 {
536         DEFINE_WAIT(wait);
537         struct btrfs_fs_info *fs_info = cache->fs_info;
538         struct btrfs_caching_control *caching_ctl;
539         int ret = 0;
540
541         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
542         if (!caching_ctl)
543                 return -ENOMEM;
544
545         INIT_LIST_HEAD(&caching_ctl->list);
546         mutex_init(&caching_ctl->mutex);
547         init_waitqueue_head(&caching_ctl->wait);
548         caching_ctl->block_group = cache;
549         caching_ctl->progress = cache->key.objectid;
550         atomic_set(&caching_ctl->count, 1);
551         btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
552
553         spin_lock(&cache->lock);
554         /*
555          * This should be a rare occasion, but this could happen I think in the
556          * case where one thread starts to load the space cache info, and then
557          * some other thread starts a transaction commit which tries to do an
558          * allocation while the other thread is still loading the space cache
559          * info.  The previous loop should have kept us from choosing this block
560          * group, but if we've moved to the state where we will wait on caching
561          * block groups we need to first check if we're doing a fast load here,
562          * so we can wait for it to finish, otherwise we could end up allocating
563          * from a block group who's cache gets evicted for one reason or
564          * another.
565          */
566         while (cache->cached == BTRFS_CACHE_FAST) {
567                 struct btrfs_caching_control *ctl;
568
569                 ctl = cache->caching_ctl;
570                 atomic_inc(&ctl->count);
571                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572                 spin_unlock(&cache->lock);
573
574                 schedule();
575
576                 finish_wait(&ctl->wait, &wait);
577                 put_caching_control(ctl);
578                 spin_lock(&cache->lock);
579         }
580
581         if (cache->cached != BTRFS_CACHE_NO) {
582                 spin_unlock(&cache->lock);
583                 kfree(caching_ctl);
584                 return 0;
585         }
586         WARN_ON(cache->caching_ctl);
587         cache->caching_ctl = caching_ctl;
588         cache->cached = BTRFS_CACHE_FAST;
589         spin_unlock(&cache->lock);
590
591         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                 } else {
600                         if (load_cache_only) {
601                                 cache->caching_ctl = NULL;
602                                 cache->cached = BTRFS_CACHE_NO;
603                         } else {
604                                 cache->cached = BTRFS_CACHE_STARTED;
605                         }
606                 }
607                 spin_unlock(&cache->lock);
608                 wake_up(&caching_ctl->wait);
609                 if (ret == 1) {
610                         put_caching_control(caching_ctl);
611                         free_excluded_extents(fs_info->extent_root, cache);
612                         return 0;
613                 }
614         } else {
615                 /*
616                  * We are not going to do the fast caching, set cached to the
617                  * appropriate value and wakeup any waiters.
618                  */
619                 spin_lock(&cache->lock);
620                 if (load_cache_only) {
621                         cache->caching_ctl = NULL;
622                         cache->cached = BTRFS_CACHE_NO;
623                 } else {
624                         cache->cached = BTRFS_CACHE_STARTED;
625                 }
626                 spin_unlock(&cache->lock);
627                 wake_up(&caching_ctl->wait);
628         }
629
630         if (load_cache_only) {
631                 put_caching_control(caching_ctl);
632                 return 0;
633         }
634
635         down_write(&fs_info->commit_root_sem);
636         atomic_inc(&caching_ctl->count);
637         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
638         up_write(&fs_info->commit_root_sem);
639
640         btrfs_get_block_group(cache);
641
642         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
643
644         return ret;
645 }
646
647 /*
648  * return the block group that starts at or after bytenr
649  */
650 static struct btrfs_block_group_cache *
651 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
652 {
653         struct btrfs_block_group_cache *cache;
654
655         cache = block_group_cache_tree_search(info, bytenr, 0);
656
657         return cache;
658 }
659
660 /*
661  * return the block group that contains the given bytenr
662  */
663 struct btrfs_block_group_cache *btrfs_lookup_block_group(
664                                                  struct btrfs_fs_info *info,
665                                                  u64 bytenr)
666 {
667         struct btrfs_block_group_cache *cache;
668
669         cache = block_group_cache_tree_search(info, bytenr, 1);
670
671         return cache;
672 }
673
674 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
675                                                   u64 flags)
676 {
677         struct list_head *head = &info->space_info;
678         struct btrfs_space_info *found;
679
680         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
681
682         rcu_read_lock();
683         list_for_each_entry_rcu(found, head, list) {
684                 if (found->flags & flags) {
685                         rcu_read_unlock();
686                         return found;
687                 }
688         }
689         rcu_read_unlock();
690         return NULL;
691 }
692
693 /*
694  * after adding space to the filesystem, we need to clear the full flags
695  * on all the space infos.
696  */
697 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
698 {
699         struct list_head *head = &info->space_info;
700         struct btrfs_space_info *found;
701
702         rcu_read_lock();
703         list_for_each_entry_rcu(found, head, list)
704                 found->full = 0;
705         rcu_read_unlock();
706 }
707
708 /* simple helper to search for an existing extent at a given offset */
709 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
710 {
711         int ret;
712         struct btrfs_key key;
713         struct btrfs_path *path;
714
715         path = btrfs_alloc_path();
716         if (!path)
717                 return -ENOMEM;
718
719         key.objectid = start;
720         key.offset = len;
721         key.type = BTRFS_EXTENT_ITEM_KEY;
722         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
723                                 0, 0);
724         if (ret > 0) {
725                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
726                 if (key.objectid == start &&
727                     key.type == BTRFS_METADATA_ITEM_KEY)
728                         ret = 0;
729         }
730         btrfs_free_path(path);
731         return ret;
732 }
733
734 /*
735  * helper function to lookup reference count and flags of a tree block.
736  *
737  * the head node for delayed ref is used to store the sum of all the
738  * reference count modifications queued up in the rbtree. the head
739  * node may also store the extent flags to set. This way you can check
740  * to see what the reference count and extent flags would be if all of
741  * the delayed refs are not processed.
742  */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744                              struct btrfs_root *root, u64 bytenr,
745                              u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747         struct btrfs_delayed_ref_head *head;
748         struct btrfs_delayed_ref_root *delayed_refs;
749         struct btrfs_path *path;
750         struct btrfs_extent_item *ei;
751         struct extent_buffer *leaf;
752         struct btrfs_key key;
753         u32 item_size;
754         u64 num_refs;
755         u64 extent_flags;
756         int ret;
757
758         /*
759          * If we don't have skinny metadata, don't bother doing anything
760          * different
761          */
762         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763                 offset = root->leafsize;
764                 metadata = 0;
765         }
766
767         path = btrfs_alloc_path();
768         if (!path)
769                 return -ENOMEM;
770
771         if (!trans) {
772                 path->skip_locking = 1;
773                 path->search_commit_root = 1;
774         }
775
776 search_again:
777         key.objectid = bytenr;
778         key.offset = offset;
779         if (metadata)
780                 key.type = BTRFS_METADATA_ITEM_KEY;
781         else
782                 key.type = BTRFS_EXTENT_ITEM_KEY;
783
784 again:
785         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786                                 &key, path, 0, 0);
787         if (ret < 0)
788                 goto out_free;
789
790         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791                 if (path->slots[0]) {
792                         path->slots[0]--;
793                         btrfs_item_key_to_cpu(path->nodes[0], &key,
794                                               path->slots[0]);
795                         if (key.objectid == bytenr &&
796                             key.type == BTRFS_EXTENT_ITEM_KEY &&
797                             key.offset == root->leafsize)
798                                 ret = 0;
799                 }
800                 if (ret) {
801                         key.objectid = bytenr;
802                         key.type = BTRFS_EXTENT_ITEM_KEY;
803                         key.offset = root->leafsize;
804                         btrfs_release_path(path);
805                         goto again;
806                 }
807         }
808
809         if (ret == 0) {
810                 leaf = path->nodes[0];
811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
812                 if (item_size >= sizeof(*ei)) {
813                         ei = btrfs_item_ptr(leaf, path->slots[0],
814                                             struct btrfs_extent_item);
815                         num_refs = btrfs_extent_refs(leaf, ei);
816                         extent_flags = btrfs_extent_flags(leaf, ei);
817                 } else {
818 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
819                         struct btrfs_extent_item_v0 *ei0;
820                         BUG_ON(item_size != sizeof(*ei0));
821                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
822                                              struct btrfs_extent_item_v0);
823                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
824                         /* FIXME: this isn't correct for data */
825                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
826 #else
827                         BUG();
828 #endif
829                 }
830                 BUG_ON(num_refs == 0);
831         } else {
832                 num_refs = 0;
833                 extent_flags = 0;
834                 ret = 0;
835         }
836
837         if (!trans)
838                 goto out;
839
840         delayed_refs = &trans->transaction->delayed_refs;
841         spin_lock(&delayed_refs->lock);
842         head = btrfs_find_delayed_ref_head(trans, bytenr);
843         if (head) {
844                 if (!mutex_trylock(&head->mutex)) {
845                         atomic_inc(&head->node.refs);
846                         spin_unlock(&delayed_refs->lock);
847
848                         btrfs_release_path(path);
849
850                         /*
851                          * Mutex was contended, block until it's released and try
852                          * again
853                          */
854                         mutex_lock(&head->mutex);
855                         mutex_unlock(&head->mutex);
856                         btrfs_put_delayed_ref(&head->node);
857                         goto search_again;
858                 }
859                 spin_lock(&head->lock);
860                 if (head->extent_op && head->extent_op->update_flags)
861                         extent_flags |= head->extent_op->flags_to_set;
862                 else
863                         BUG_ON(num_refs == 0);
864
865                 num_refs += head->node.ref_mod;
866                 spin_unlock(&head->lock);
867                 mutex_unlock(&head->mutex);
868         }
869         spin_unlock(&delayed_refs->lock);
870 out:
871         WARN_ON(num_refs == 0);
872         if (refs)
873                 *refs = num_refs;
874         if (flags)
875                 *flags = extent_flags;
876 out_free:
877         btrfs_free_path(path);
878         return ret;
879 }
880
881 /*
882  * Back reference rules.  Back refs have three main goals:
883  *
884  * 1) differentiate between all holders of references to an extent so that
885  *    when a reference is dropped we can make sure it was a valid reference
886  *    before freeing the extent.
887  *
888  * 2) Provide enough information to quickly find the holders of an extent
889  *    if we notice a given block is corrupted or bad.
890  *
891  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
892  *    maintenance.  This is actually the same as #2, but with a slightly
893  *    different use case.
894  *
895  * There are two kinds of back refs. The implicit back refs is optimized
896  * for pointers in non-shared tree blocks. For a given pointer in a block,
897  * back refs of this kind provide information about the block's owner tree
898  * and the pointer's key. These information allow us to find the block by
899  * b-tree searching. The full back refs is for pointers in tree blocks not
900  * referenced by their owner trees. The location of tree block is recorded
901  * in the back refs. Actually the full back refs is generic, and can be
902  * used in all cases the implicit back refs is used. The major shortcoming
903  * of the full back refs is its overhead. Every time a tree block gets
904  * COWed, we have to update back refs entry for all pointers in it.
905  *
906  * For a newly allocated tree block, we use implicit back refs for
907  * pointers in it. This means most tree related operations only involve
908  * implicit back refs. For a tree block created in old transaction, the
909  * only way to drop a reference to it is COW it. So we can detect the
910  * event that tree block loses its owner tree's reference and do the
911  * back refs conversion.
912  *
913  * When a tree block is COW'd through a tree, there are four cases:
914  *
915  * The reference count of the block is one and the tree is the block's
916  * owner tree. Nothing to do in this case.
917  *
918  * The reference count of the block is one and the tree is not the
919  * block's owner tree. In this case, full back refs is used for pointers
920  * in the block. Remove these full back refs, add implicit back refs for
921  * every pointers in the new block.
922  *
923  * The reference count of the block is greater than one and the tree is
924  * the block's owner tree. In this case, implicit back refs is used for
925  * pointers in the block. Add full back refs for every pointers in the
926  * block, increase lower level extents' reference counts. The original
927  * implicit back refs are entailed to the new block.
928  *
929  * The reference count of the block is greater than one and the tree is
930  * not the block's owner tree. Add implicit back refs for every pointer in
931  * the new block, increase lower level extents' reference count.
932  *
933  * Back Reference Key composing:
934  *
935  * The key objectid corresponds to the first byte in the extent,
936  * The key type is used to differentiate between types of back refs.
937  * There are different meanings of the key offset for different types
938  * of back refs.
939  *
940  * File extents can be referenced by:
941  *
942  * - multiple snapshots, subvolumes, or different generations in one subvol
943  * - different files inside a single subvolume
944  * - different offsets inside a file (bookend extents in file.c)
945  *
946  * The extent ref structure for the implicit back refs has fields for:
947  *
948  * - Objectid of the subvolume root
949  * - objectid of the file holding the reference
950  * - original offset in the file
951  * - how many bookend extents
952  *
953  * The key offset for the implicit back refs is hash of the first
954  * three fields.
955  *
956  * The extent ref structure for the full back refs has field for:
957  *
958  * - number of pointers in the tree leaf
959  *
960  * The key offset for the implicit back refs is the first byte of
961  * the tree leaf
962  *
963  * When a file extent is allocated, The implicit back refs is used.
964  * the fields are filled in:
965  *
966  *     (root_key.objectid, inode objectid, offset in file, 1)
967  *
968  * When a file extent is removed file truncation, we find the
969  * corresponding implicit back refs and check the following fields:
970  *
971  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
972  *
973  * Btree extents can be referenced by:
974  *
975  * - Different subvolumes
976  *
977  * Both the implicit back refs and the full back refs for tree blocks
978  * only consist of key. The key offset for the implicit back refs is
979  * objectid of block's owner tree. The key offset for the full back refs
980  * is the first byte of parent block.
981  *
982  * When implicit back refs is used, information about the lowest key and
983  * level of the tree block are required. These information are stored in
984  * tree block info structure.
985  */
986
987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
988 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
989                                   struct btrfs_root *root,
990                                   struct btrfs_path *path,
991                                   u64 owner, u32 extra_size)
992 {
993         struct btrfs_extent_item *item;
994         struct btrfs_extent_item_v0 *ei0;
995         struct btrfs_extent_ref_v0 *ref0;
996         struct btrfs_tree_block_info *bi;
997         struct extent_buffer *leaf;
998         struct btrfs_key key;
999         struct btrfs_key found_key;
1000         u32 new_size = sizeof(*item);
1001         u64 refs;
1002         int ret;
1003
1004         leaf = path->nodes[0];
1005         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1006
1007         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1008         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1009                              struct btrfs_extent_item_v0);
1010         refs = btrfs_extent_refs_v0(leaf, ei0);
1011
1012         if (owner == (u64)-1) {
1013                 while (1) {
1014                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1015                                 ret = btrfs_next_leaf(root, path);
1016                                 if (ret < 0)
1017                                         return ret;
1018                                 BUG_ON(ret > 0); /* Corruption */
1019                                 leaf = path->nodes[0];
1020                         }
1021                         btrfs_item_key_to_cpu(leaf, &found_key,
1022                                               path->slots[0]);
1023                         BUG_ON(key.objectid != found_key.objectid);
1024                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1025                                 path->slots[0]++;
1026                                 continue;
1027                         }
1028                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1029                                               struct btrfs_extent_ref_v0);
1030                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1031                         break;
1032                 }
1033         }
1034         btrfs_release_path(path);
1035
1036         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1037                 new_size += sizeof(*bi);
1038
1039         new_size -= sizeof(*ei0);
1040         ret = btrfs_search_slot(trans, root, &key, path,
1041                                 new_size + extra_size, 1);
1042         if (ret < 0)
1043                 return ret;
1044         BUG_ON(ret); /* Corruption */
1045
1046         btrfs_extend_item(root, path, new_size);
1047
1048         leaf = path->nodes[0];
1049         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1050         btrfs_set_extent_refs(leaf, item, refs);
1051         /* FIXME: get real generation */
1052         btrfs_set_extent_generation(leaf, item, 0);
1053         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1054                 btrfs_set_extent_flags(leaf, item,
1055                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1056                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1057                 bi = (struct btrfs_tree_block_info *)(item + 1);
1058                 /* FIXME: get first key of the block */
1059                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1060                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1061         } else {
1062                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1063         }
1064         btrfs_mark_buffer_dirty(leaf);
1065         return 0;
1066 }
1067 #endif
1068
1069 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1070 {
1071         u32 high_crc = ~(u32)0;
1072         u32 low_crc = ~(u32)0;
1073         __le64 lenum;
1074
1075         lenum = cpu_to_le64(root_objectid);
1076         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1077         lenum = cpu_to_le64(owner);
1078         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1079         lenum = cpu_to_le64(offset);
1080         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1081
1082         return ((u64)high_crc << 31) ^ (u64)low_crc;
1083 }
1084
1085 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1086                                      struct btrfs_extent_data_ref *ref)
1087 {
1088         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1089                                     btrfs_extent_data_ref_objectid(leaf, ref),
1090                                     btrfs_extent_data_ref_offset(leaf, ref));
1091 }
1092
1093 static int match_extent_data_ref(struct extent_buffer *leaf,
1094                                  struct btrfs_extent_data_ref *ref,
1095                                  u64 root_objectid, u64 owner, u64 offset)
1096 {
1097         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1098             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1099             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1100                 return 0;
1101         return 1;
1102 }
1103
1104 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1105                                            struct btrfs_root *root,
1106                                            struct btrfs_path *path,
1107                                            u64 bytenr, u64 parent,
1108                                            u64 root_objectid,
1109                                            u64 owner, u64 offset)
1110 {
1111         struct btrfs_key key;
1112         struct btrfs_extent_data_ref *ref;
1113         struct extent_buffer *leaf;
1114         u32 nritems;
1115         int ret;
1116         int recow;
1117         int err = -ENOENT;
1118
1119         key.objectid = bytenr;
1120         if (parent) {
1121                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1122                 key.offset = parent;
1123         } else {
1124                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1125                 key.offset = hash_extent_data_ref(root_objectid,
1126                                                   owner, offset);
1127         }
1128 again:
1129         recow = 0;
1130         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1131         if (ret < 0) {
1132                 err = ret;
1133                 goto fail;
1134         }
1135
1136         if (parent) {
1137                 if (!ret)
1138                         return 0;
1139 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1140                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1141                 btrfs_release_path(path);
1142                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1143                 if (ret < 0) {
1144                         err = ret;
1145                         goto fail;
1146                 }
1147                 if (!ret)
1148                         return 0;
1149 #endif
1150                 goto fail;
1151         }
1152
1153         leaf = path->nodes[0];
1154         nritems = btrfs_header_nritems(leaf);
1155         while (1) {
1156                 if (path->slots[0] >= nritems) {
1157                         ret = btrfs_next_leaf(root, path);
1158                         if (ret < 0)
1159                                 err = ret;
1160                         if (ret)
1161                                 goto fail;
1162
1163                         leaf = path->nodes[0];
1164                         nritems = btrfs_header_nritems(leaf);
1165                         recow = 1;
1166                 }
1167
1168                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1169                 if (key.objectid != bytenr ||
1170                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1171                         goto fail;
1172
1173                 ref = btrfs_item_ptr(leaf, path->slots[0],
1174                                      struct btrfs_extent_data_ref);
1175
1176                 if (match_extent_data_ref(leaf, ref, root_objectid,
1177                                           owner, offset)) {
1178                         if (recow) {
1179                                 btrfs_release_path(path);
1180                                 goto again;
1181                         }
1182                         err = 0;
1183                         break;
1184                 }
1185                 path->slots[0]++;
1186         }
1187 fail:
1188         return err;
1189 }
1190
1191 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1192                                            struct btrfs_root *root,
1193                                            struct btrfs_path *path,
1194                                            u64 bytenr, u64 parent,
1195                                            u64 root_objectid, u64 owner,
1196                                            u64 offset, int refs_to_add)
1197 {
1198         struct btrfs_key key;
1199         struct extent_buffer *leaf;
1200         u32 size;
1201         u32 num_refs;
1202         int ret;
1203
1204         key.objectid = bytenr;
1205         if (parent) {
1206                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1207                 key.offset = parent;
1208                 size = sizeof(struct btrfs_shared_data_ref);
1209         } else {
1210                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1211                 key.offset = hash_extent_data_ref(root_objectid,
1212                                                   owner, offset);
1213                 size = sizeof(struct btrfs_extent_data_ref);
1214         }
1215
1216         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1217         if (ret && ret != -EEXIST)
1218                 goto fail;
1219
1220         leaf = path->nodes[0];
1221         if (parent) {
1222                 struct btrfs_shared_data_ref *ref;
1223                 ref = btrfs_item_ptr(leaf, path->slots[0],
1224                                      struct btrfs_shared_data_ref);
1225                 if (ret == 0) {
1226                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1227                 } else {
1228                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1229                         num_refs += refs_to_add;
1230                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1231                 }
1232         } else {
1233                 struct btrfs_extent_data_ref *ref;
1234                 while (ret == -EEXIST) {
1235                         ref = btrfs_item_ptr(leaf, path->slots[0],
1236                                              struct btrfs_extent_data_ref);
1237                         if (match_extent_data_ref(leaf, ref, root_objectid,
1238                                                   owner, offset))
1239                                 break;
1240                         btrfs_release_path(path);
1241                         key.offset++;
1242                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1243                                                       size);
1244                         if (ret && ret != -EEXIST)
1245                                 goto fail;
1246
1247                         leaf = path->nodes[0];
1248                 }
1249                 ref = btrfs_item_ptr(leaf, path->slots[0],
1250                                      struct btrfs_extent_data_ref);
1251                 if (ret == 0) {
1252                         btrfs_set_extent_data_ref_root(leaf, ref,
1253                                                        root_objectid);
1254                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1255                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1256                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1257                 } else {
1258                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1259                         num_refs += refs_to_add;
1260                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1261                 }
1262         }
1263         btrfs_mark_buffer_dirty(leaf);
1264         ret = 0;
1265 fail:
1266         btrfs_release_path(path);
1267         return ret;
1268 }
1269
1270 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1271                                            struct btrfs_root *root,
1272                                            struct btrfs_path *path,
1273                                            int refs_to_drop)
1274 {
1275         struct btrfs_key key;
1276         struct btrfs_extent_data_ref *ref1 = NULL;
1277         struct btrfs_shared_data_ref *ref2 = NULL;
1278         struct extent_buffer *leaf;
1279         u32 num_refs = 0;
1280         int ret = 0;
1281
1282         leaf = path->nodes[0];
1283         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1284
1285         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1286                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1287                                       struct btrfs_extent_data_ref);
1288                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1289         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1290                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1291                                       struct btrfs_shared_data_ref);
1292                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1294         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1295                 struct btrfs_extent_ref_v0 *ref0;
1296                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1297                                       struct btrfs_extent_ref_v0);
1298                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1299 #endif
1300         } else {
1301                 BUG();
1302         }
1303
1304         BUG_ON(num_refs < refs_to_drop);
1305         num_refs -= refs_to_drop;
1306
1307         if (num_refs == 0) {
1308                 ret = btrfs_del_item(trans, root, path);
1309         } else {
1310                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1311                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1312                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1313                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1315                 else {
1316                         struct btrfs_extent_ref_v0 *ref0;
1317                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1318                                         struct btrfs_extent_ref_v0);
1319                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1320                 }
1321 #endif
1322                 btrfs_mark_buffer_dirty(leaf);
1323         }
1324         return ret;
1325 }
1326
1327 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1328                                           struct btrfs_path *path,
1329                                           struct btrfs_extent_inline_ref *iref)
1330 {
1331         struct btrfs_key key;
1332         struct extent_buffer *leaf;
1333         struct btrfs_extent_data_ref *ref1;
1334         struct btrfs_shared_data_ref *ref2;
1335         u32 num_refs = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339         if (iref) {
1340                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1341                     BTRFS_EXTENT_DATA_REF_KEY) {
1342                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1343                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344                 } else {
1345                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1346                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347                 }
1348         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1349                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1350                                       struct btrfs_extent_data_ref);
1351                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1352         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1353                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1354                                       struct btrfs_shared_data_ref);
1355                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1358                 struct btrfs_extent_ref_v0 *ref0;
1359                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1360                                       struct btrfs_extent_ref_v0);
1361                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1362 #endif
1363         } else {
1364                 WARN_ON(1);
1365         }
1366         return num_refs;
1367 }
1368
1369 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1370                                           struct btrfs_root *root,
1371                                           struct btrfs_path *path,
1372                                           u64 bytenr, u64 parent,
1373                                           u64 root_objectid)
1374 {
1375         struct btrfs_key key;
1376         int ret;
1377
1378         key.objectid = bytenr;
1379         if (parent) {
1380                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                 key.offset = parent;
1382         } else {
1383                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384                 key.offset = root_objectid;
1385         }
1386
1387         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388         if (ret > 0)
1389                 ret = -ENOENT;
1390 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1391         if (ret == -ENOENT && parent) {
1392                 btrfs_release_path(path);
1393                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1394                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1395                 if (ret > 0)
1396                         ret = -ENOENT;
1397         }
1398 #endif
1399         return ret;
1400 }
1401
1402 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1403                                           struct btrfs_root *root,
1404                                           struct btrfs_path *path,
1405                                           u64 bytenr, u64 parent,
1406                                           u64 root_objectid)
1407 {
1408         struct btrfs_key key;
1409         int ret;
1410
1411         key.objectid = bytenr;
1412         if (parent) {
1413                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1414                 key.offset = parent;
1415         } else {
1416                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1417                 key.offset = root_objectid;
1418         }
1419
1420         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1421         btrfs_release_path(path);
1422         return ret;
1423 }
1424
1425 static inline int extent_ref_type(u64 parent, u64 owner)
1426 {
1427         int type;
1428         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1429                 if (parent > 0)
1430                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1431                 else
1432                         type = BTRFS_TREE_BLOCK_REF_KEY;
1433         } else {
1434                 if (parent > 0)
1435                         type = BTRFS_SHARED_DATA_REF_KEY;
1436                 else
1437                         type = BTRFS_EXTENT_DATA_REF_KEY;
1438         }
1439         return type;
1440 }
1441
1442 static int find_next_key(struct btrfs_path *path, int level,
1443                          struct btrfs_key *key)
1444
1445 {
1446         for (; level < BTRFS_MAX_LEVEL; level++) {
1447                 if (!path->nodes[level])
1448                         break;
1449                 if (path->slots[level] + 1 >=
1450                     btrfs_header_nritems(path->nodes[level]))
1451                         continue;
1452                 if (level == 0)
1453                         btrfs_item_key_to_cpu(path->nodes[level], key,
1454                                               path->slots[level] + 1);
1455                 else
1456                         btrfs_node_key_to_cpu(path->nodes[level], key,
1457                                               path->slots[level] + 1);
1458                 return 0;
1459         }
1460         return 1;
1461 }
1462
1463 /*
1464  * look for inline back ref. if back ref is found, *ref_ret is set
1465  * to the address of inline back ref, and 0 is returned.
1466  *
1467  * if back ref isn't found, *ref_ret is set to the address where it
1468  * should be inserted, and -ENOENT is returned.
1469  *
1470  * if insert is true and there are too many inline back refs, the path
1471  * points to the extent item, and -EAGAIN is returned.
1472  *
1473  * NOTE: inline back refs are ordered in the same way that back ref
1474  *       items in the tree are ordered.
1475  */
1476 static noinline_for_stack
1477 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478                                  struct btrfs_root *root,
1479                                  struct btrfs_path *path,
1480                                  struct btrfs_extent_inline_ref **ref_ret,
1481                                  u64 bytenr, u64 num_bytes,
1482                                  u64 parent, u64 root_objectid,
1483                                  u64 owner, u64 offset, int insert)
1484 {
1485         struct btrfs_key key;
1486         struct extent_buffer *leaf;
1487         struct btrfs_extent_item *ei;
1488         struct btrfs_extent_inline_ref *iref;
1489         u64 flags;
1490         u64 item_size;
1491         unsigned long ptr;
1492         unsigned long end;
1493         int extra_size;
1494         int type;
1495         int want;
1496         int ret;
1497         int err = 0;
1498         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1499                                                  SKINNY_METADATA);
1500
1501         key.objectid = bytenr;
1502         key.type = BTRFS_EXTENT_ITEM_KEY;
1503         key.offset = num_bytes;
1504
1505         want = extent_ref_type(parent, owner);
1506         if (insert) {
1507                 extra_size = btrfs_extent_inline_ref_size(want);
1508                 path->keep_locks = 1;
1509         } else
1510                 extra_size = -1;
1511
1512         /*
1513          * Owner is our parent level, so we can just add one to get the level
1514          * for the block we are interested in.
1515          */
1516         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1517                 key.type = BTRFS_METADATA_ITEM_KEY;
1518                 key.offset = owner;
1519         }
1520
1521 again:
1522         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1523         if (ret < 0) {
1524                 err = ret;
1525                 goto out;
1526         }
1527
1528         /*
1529          * We may be a newly converted file system which still has the old fat
1530          * extent entries for metadata, so try and see if we have one of those.
1531          */
1532         if (ret > 0 && skinny_metadata) {
1533                 skinny_metadata = false;
1534                 if (path->slots[0]) {
1535                         path->slots[0]--;
1536                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1537                                               path->slots[0]);
1538                         if (key.objectid == bytenr &&
1539                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1540                             key.offset == num_bytes)
1541                                 ret = 0;
1542                 }
1543                 if (ret) {
1544                         key.objectid = bytenr;
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982
1983         path = btrfs_alloc_path();
1984         if (!path)
1985                 return -ENOMEM;
1986
1987         path->reada = 1;
1988         path->leave_spinning = 1;
1989         /* this will setup the path even if it fails to insert the back ref */
1990         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991                                            path, bytenr, num_bytes, parent,
1992                                            root_objectid, owner, offset,
1993                                            refs_to_add, extent_op);
1994         if (ret != -EAGAIN)
1995                 goto out;
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         trace_run_delayed_data_ref(node, ref, node->action);
2040
2041         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042                 parent = ref->parent;
2043         else
2044                 ref_root = ref->root;
2045
2046         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2047                 if (extent_op)
2048                         flags |= extent_op->flags_to_set;
2049                 ret = alloc_reserved_file_extent(trans, root,
2050                                                  parent, ref_root, flags,
2051                                                  ref->objectid, ref->offset,
2052                                                  &ins, node->ref_mod);
2053         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055                                              node->num_bytes, parent,
2056                                              ref_root, ref->objectid,
2057                                              ref->offset, node->ref_mod,
2058                                              extent_op);
2059         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061                                           node->num_bytes, parent,
2062                                           ref_root, ref->objectid,
2063                                           ref->offset, node->ref_mod,
2064                                           extent_op);
2065         } else {
2066                 BUG();
2067         }
2068         return ret;
2069 }
2070
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072                                     struct extent_buffer *leaf,
2073                                     struct btrfs_extent_item *ei)
2074 {
2075         u64 flags = btrfs_extent_flags(leaf, ei);
2076         if (extent_op->update_flags) {
2077                 flags |= extent_op->flags_to_set;
2078                 btrfs_set_extent_flags(leaf, ei, flags);
2079         }
2080
2081         if (extent_op->update_key) {
2082                 struct btrfs_tree_block_info *bi;
2083                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2086         }
2087 }
2088
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090                                  struct btrfs_root *root,
2091                                  struct btrfs_delayed_ref_node *node,
2092                                  struct btrfs_delayed_extent_op *extent_op)
2093 {
2094         struct btrfs_key key;
2095         struct btrfs_path *path;
2096         struct btrfs_extent_item *ei;
2097         struct extent_buffer *leaf;
2098         u32 item_size;
2099         int ret;
2100         int err = 0;
2101         int metadata = !extent_op->is_data;
2102
2103         if (trans->aborted)
2104                 return 0;
2105
2106         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2107                 metadata = 0;
2108
2109         path = btrfs_alloc_path();
2110         if (!path)
2111                 return -ENOMEM;
2112
2113         key.objectid = node->bytenr;
2114
2115         if (metadata) {
2116                 key.type = BTRFS_METADATA_ITEM_KEY;
2117                 key.offset = extent_op->level;
2118         } else {
2119                 key.type = BTRFS_EXTENT_ITEM_KEY;
2120                 key.offset = node->num_bytes;
2121         }
2122
2123 again:
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2127                                 path, 0, 1);
2128         if (ret < 0) {
2129                 err = ret;
2130                 goto out;
2131         }
2132         if (ret > 0) {
2133                 if (metadata) {
2134                         if (path->slots[0] > 0) {
2135                                 path->slots[0]--;
2136                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2137                                                       path->slots[0]);
2138                                 if (key.objectid == node->bytenr &&
2139                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2140                                     key.offset == node->num_bytes)
2141                                         ret = 0;
2142                         }
2143                         if (ret > 0) {
2144                                 btrfs_release_path(path);
2145                                 metadata = 0;
2146
2147                                 key.objectid = node->bytenr;
2148                                 key.offset = node->num_bytes;
2149                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2150                                 goto again;
2151                         }
2152                 } else {
2153                         err = -EIO;
2154                         goto out;
2155                 }
2156         }
2157
2158         leaf = path->nodes[0];
2159         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161         if (item_size < sizeof(*ei)) {
2162                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2163                                              path, (u64)-1, 0);
2164                 if (ret < 0) {
2165                         err = ret;
2166                         goto out;
2167                 }
2168                 leaf = path->nodes[0];
2169                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170         }
2171 #endif
2172         BUG_ON(item_size < sizeof(*ei));
2173         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2174         __run_delayed_extent_op(extent_op, leaf, ei);
2175
2176         btrfs_mark_buffer_dirty(leaf);
2177 out:
2178         btrfs_free_path(path);
2179         return err;
2180 }
2181
2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2183                                 struct btrfs_root *root,
2184                                 struct btrfs_delayed_ref_node *node,
2185                                 struct btrfs_delayed_extent_op *extent_op,
2186                                 int insert_reserved)
2187 {
2188         int ret = 0;
2189         struct btrfs_delayed_tree_ref *ref;
2190         struct btrfs_key ins;
2191         u64 parent = 0;
2192         u64 ref_root = 0;
2193         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2194                                                  SKINNY_METADATA);
2195
2196         ref = btrfs_delayed_node_to_tree_ref(node);
2197         trace_run_delayed_tree_ref(node, ref, node->action);
2198
2199         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2200                 parent = ref->parent;
2201         else
2202                 ref_root = ref->root;
2203
2204         ins.objectid = node->bytenr;
2205         if (skinny_metadata) {
2206                 ins.offset = ref->level;
2207                 ins.type = BTRFS_METADATA_ITEM_KEY;
2208         } else {
2209                 ins.offset = node->num_bytes;
2210                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2211         }
2212
2213         BUG_ON(node->ref_mod != 1);
2214         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2215                 BUG_ON(!extent_op || !extent_op->update_flags);
2216                 ret = alloc_reserved_tree_block(trans, root,
2217                                                 parent, ref_root,
2218                                                 extent_op->flags_to_set,
2219                                                 &extent_op->key,
2220                                                 ref->level, &ins);
2221         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2222                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2223                                              node->num_bytes, parent, ref_root,
2224                                              ref->level, 0, 1, extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2227                                           node->num_bytes, parent, ref_root,
2228                                           ref->level, 0, 1, extent_op);
2229         } else {
2230                 BUG();
2231         }
2232         return ret;
2233 }
2234
2235 /* helper function to actually process a single delayed ref entry */
2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2237                                struct btrfs_root *root,
2238                                struct btrfs_delayed_ref_node *node,
2239                                struct btrfs_delayed_extent_op *extent_op,
2240                                int insert_reserved)
2241 {
2242         int ret = 0;
2243
2244         if (trans->aborted) {
2245                 if (insert_reserved)
2246                         btrfs_pin_extent(root, node->bytenr,
2247                                          node->num_bytes, 1);
2248                 return 0;
2249         }
2250
2251         if (btrfs_delayed_ref_is_head(node)) {
2252                 struct btrfs_delayed_ref_head *head;
2253                 /*
2254                  * we've hit the end of the chain and we were supposed
2255                  * to insert this extent into the tree.  But, it got
2256                  * deleted before we ever needed to insert it, so all
2257                  * we have to do is clean up the accounting
2258                  */
2259                 BUG_ON(extent_op);
2260                 head = btrfs_delayed_node_to_head(node);
2261                 trace_run_delayed_ref_head(node, head, node->action);
2262
2263                 if (insert_reserved) {
2264                         btrfs_pin_extent(root, node->bytenr,
2265                                          node->num_bytes, 1);
2266                         if (head->is_data) {
2267                                 ret = btrfs_del_csums(trans, root,
2268                                                       node->bytenr,
2269                                                       node->num_bytes);
2270                         }
2271                 }
2272                 return ret;
2273         }
2274
2275         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2276             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2277                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2278                                            insert_reserved);
2279         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2280                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2281                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2282                                            insert_reserved);
2283         else
2284                 BUG();
2285         return ret;
2286 }
2287
2288 static noinline struct btrfs_delayed_ref_node *
2289 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2290 {
2291         struct rb_node *node;
2292         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2293
2294         /*
2295          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2296          * this prevents ref count from going down to zero when
2297          * there still are pending delayed ref.
2298          */
2299         node = rb_first(&head->ref_root);
2300         while (node) {
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2304                         return ref;
2305                 else if (last == NULL)
2306                         last = ref;
2307                 node = rb_next(node);
2308         }
2309         return last;
2310 }
2311
2312 /*
2313  * Returns 0 on success or if called with an already aborted transaction.
2314  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2315  */
2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2317                                              struct btrfs_root *root,
2318                                              unsigned long nr)
2319 {
2320         struct btrfs_delayed_ref_root *delayed_refs;
2321         struct btrfs_delayed_ref_node *ref;
2322         struct btrfs_delayed_ref_head *locked_ref = NULL;
2323         struct btrfs_delayed_extent_op *extent_op;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325         ktime_t start = ktime_get();
2326         int ret;
2327         unsigned long count = 0;
2328         unsigned long actual_count = 0;
2329         int must_insert_reserved = 0;
2330
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         while (1) {
2333                 if (!locked_ref) {
2334                         if (count >= nr)
2335                                 break;
2336
2337                         spin_lock(&delayed_refs->lock);
2338                         locked_ref = btrfs_select_ref_head(trans);
2339                         if (!locked_ref) {
2340                                 spin_unlock(&delayed_refs->lock);
2341                                 break;
2342                         }
2343
2344                         /* grab the lock that says we are going to process
2345                          * all the refs for this head */
2346                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2347                         spin_unlock(&delayed_refs->lock);
2348                         /*
2349                          * we may have dropped the spin lock to get the head
2350                          * mutex lock, and that might have given someone else
2351                          * time to free the head.  If that's true, it has been
2352                          * removed from our list and we can move on.
2353                          */
2354                         if (ret == -EAGAIN) {
2355                                 locked_ref = NULL;
2356                                 count++;
2357                                 continue;
2358                         }
2359                 }
2360
2361                 /*
2362                  * We need to try and merge add/drops of the same ref since we
2363                  * can run into issues with relocate dropping the implicit ref
2364                  * and then it being added back again before the drop can
2365                  * finish.  If we merged anything we need to re-loop so we can
2366                  * get a good ref.
2367                  */
2368                 spin_lock(&locked_ref->lock);
2369                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2370                                          locked_ref);
2371
2372                 /*
2373                  * locked_ref is the head node, so we have to go one
2374                  * node back for any delayed ref updates
2375                  */
2376                 ref = select_delayed_ref(locked_ref);
2377
2378                 if (ref && ref->seq &&
2379                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2380                         spin_unlock(&locked_ref->lock);
2381                         btrfs_delayed_ref_unlock(locked_ref);
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref->processing = 0;
2384                         delayed_refs->num_heads_ready++;
2385                         spin_unlock(&delayed_refs->lock);
2386                         locked_ref = NULL;
2387                         cond_resched();
2388                         count++;
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403
2404
2405                         /* All delayed refs have been processed, Go ahead
2406                          * and send the head node to run_one_delayed_ref,
2407                          * so that any accounting fixes can happen
2408                          */
2409                         ref = &locked_ref->node;
2410
2411                         if (extent_op && must_insert_reserved) {
2412                                 btrfs_free_delayed_extent_op(extent_op);
2413                                 extent_op = NULL;
2414                         }
2415
2416                         if (extent_op) {
2417                                 spin_unlock(&locked_ref->lock);
2418                                 ret = run_delayed_extent_op(trans, root,
2419                                                             ref, extent_op);
2420                                 btrfs_free_delayed_extent_op(extent_op);
2421
2422                                 if (ret) {
2423                                         /*
2424                                          * Need to reset must_insert_reserved if
2425                                          * there was an error so the abort stuff
2426                                          * can cleanup the reserved space
2427                                          * properly.
2428                                          */
2429                                         if (must_insert_reserved)
2430                                                 locked_ref->must_insert_reserved = 1;
2431                                         locked_ref->processing = 0;
2432                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2433                                         btrfs_delayed_ref_unlock(locked_ref);
2434                                         return ret;
2435                                 }
2436                                 continue;
2437                         }
2438
2439                         /*
2440                          * Need to drop our head ref lock and re-aqcuire the
2441                          * delayed ref lock and then re-check to make sure
2442                          * nobody got added.
2443                          */
2444                         spin_unlock(&locked_ref->lock);
2445                         spin_lock(&delayed_refs->lock);
2446                         spin_lock(&locked_ref->lock);
2447                         if (rb_first(&locked_ref->ref_root) ||
2448                             locked_ref->extent_op) {
2449                                 spin_unlock(&locked_ref->lock);
2450                                 spin_unlock(&delayed_refs->lock);
2451                                 continue;
2452                         }
2453                         ref->in_tree = 0;
2454                         delayed_refs->num_heads--;
2455                         rb_erase(&locked_ref->href_node,
2456                                  &delayed_refs->href_root);
2457                         spin_unlock(&delayed_refs->lock);
2458                 } else {
2459                         actual_count++;
2460                         ref->in_tree = 0;
2461                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2462                 }
2463                 atomic_dec(&delayed_refs->num_entries);
2464
2465                 if (!btrfs_delayed_ref_is_head(ref)) {
2466                         /*
2467                          * when we play the delayed ref, also correct the
2468                          * ref_mod on head
2469                          */
2470                         switch (ref->action) {
2471                         case BTRFS_ADD_DELAYED_REF:
2472                         case BTRFS_ADD_DELAYED_EXTENT:
2473                                 locked_ref->node.ref_mod -= ref->ref_mod;
2474                                 break;
2475                         case BTRFS_DROP_DELAYED_REF:
2476                                 locked_ref->node.ref_mod += ref->ref_mod;
2477                                 break;
2478                         default:
2479                                 WARN_ON(1);
2480                         }
2481                 }
2482                 spin_unlock(&locked_ref->lock);
2483
2484                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2485                                           must_insert_reserved);
2486
2487                 btrfs_free_delayed_extent_op(extent_op);
2488                 if (ret) {
2489                         locked_ref->processing = 0;
2490                         btrfs_delayed_ref_unlock(locked_ref);
2491                         btrfs_put_delayed_ref(ref);
2492                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2493                         return ret;
2494                 }
2495
2496                 /*
2497                  * If this node is a head, that means all the refs in this head
2498                  * have been dealt with, and we will pick the next head to deal
2499                  * with, so we must unlock the head and drop it from the cluster
2500                  * list before we release it.
2501                  */
2502                 if (btrfs_delayed_ref_is_head(ref)) {
2503                         btrfs_delayed_ref_unlock(locked_ref);
2504                         locked_ref = NULL;
2505                 }
2506                 btrfs_put_delayed_ref(ref);
2507                 count++;
2508                 cond_resched();
2509         }
2510
2511         /*
2512          * We don't want to include ref heads since we can have empty ref heads
2513          * and those will drastically skew our runtime down since we just do
2514          * accounting, no actual extent tree updates.
2515          */
2516         if (actual_count > 0) {
2517                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2518                 u64 avg;
2519
2520                 /*
2521                  * We weigh the current average higher than our current runtime
2522                  * to avoid large swings in the average.
2523                  */
2524                 spin_lock(&delayed_refs->lock);
2525                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2526                 avg = div64_u64(avg, 4);
2527                 fs_info->avg_delayed_ref_runtime = avg;
2528                 spin_unlock(&delayed_refs->lock);
2529         }
2530         return 0;
2531 }
2532
2533 #ifdef SCRAMBLE_DELAYED_REFS
2534 /*
2535  * Normally delayed refs get processed in ascending bytenr order. This
2536  * correlates in most cases to the order added. To expose dependencies on this
2537  * order, we start to process the tree in the middle instead of the beginning
2538  */
2539 static u64 find_middle(struct rb_root *root)
2540 {
2541         struct rb_node *n = root->rb_node;
2542         struct btrfs_delayed_ref_node *entry;
2543         int alt = 1;
2544         u64 middle;
2545         u64 first = 0, last = 0;
2546
2547         n = rb_first(root);
2548         if (n) {
2549                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2550                 first = entry->bytenr;
2551         }
2552         n = rb_last(root);
2553         if (n) {
2554                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2555                 last = entry->bytenr;
2556         }
2557         n = root->rb_node;
2558
2559         while (n) {
2560                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2561                 WARN_ON(!entry->in_tree);
2562
2563                 middle = entry->bytenr;
2564
2565                 if (alt)
2566                         n = n->rb_left;
2567                 else
2568                         n = n->rb_right;
2569
2570                 alt = 1 - alt;
2571         }
2572         return middle;
2573 }
2574 #endif
2575
2576 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2577                                          struct btrfs_fs_info *fs_info)
2578 {
2579         struct qgroup_update *qgroup_update;
2580         int ret = 0;
2581
2582         if (list_empty(&trans->qgroup_ref_list) !=
2583             !trans->delayed_ref_elem.seq) {
2584                 /* list without seq or seq without list */
2585                 btrfs_err(fs_info,
2586                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2587                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2588                         (u32)(trans->delayed_ref_elem.seq >> 32),
2589                         (u32)trans->delayed_ref_elem.seq);
2590                 BUG();
2591         }
2592
2593         if (!trans->delayed_ref_elem.seq)
2594                 return 0;
2595
2596         while (!list_empty(&trans->qgroup_ref_list)) {
2597                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2598                                                  struct qgroup_update, list);
2599                 list_del(&qgroup_update->list);
2600                 if (!ret)
2601                         ret = btrfs_qgroup_account_ref(
2602                                         trans, fs_info, qgroup_update->node,
2603                                         qgroup_update->extent_op);
2604                 kfree(qgroup_update);
2605         }
2606
2607         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2608
2609         return ret;
2610 }
2611
2612 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2613 {
2614         u64 num_bytes;
2615
2616         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2617                              sizeof(struct btrfs_extent_inline_ref));
2618         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2619                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2620
2621         /*
2622          * We don't ever fill up leaves all the way so multiply by 2 just to be
2623          * closer to what we're really going to want to ouse.
2624          */
2625         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2626 }
2627
2628 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2629                                        struct btrfs_root *root)
2630 {
2631         struct btrfs_block_rsv *global_rsv;
2632         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2633         u64 num_bytes;
2634         int ret = 0;
2635
2636         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2637         num_heads = heads_to_leaves(root, num_heads);
2638         if (num_heads > 1)
2639                 num_bytes += (num_heads - 1) * root->leafsize;
2640         num_bytes <<= 1;
2641         global_rsv = &root->fs_info->global_block_rsv;
2642
2643         /*
2644          * If we can't allocate any more chunks lets make sure we have _lots_ of
2645          * wiggle room since running delayed refs can create more delayed refs.
2646          */
2647         if (global_rsv->space_info->full)
2648                 num_bytes <<= 1;
2649
2650         spin_lock(&global_rsv->lock);
2651         if (global_rsv->reserved <= num_bytes)
2652                 ret = 1;
2653         spin_unlock(&global_rsv->lock);
2654         return ret;
2655 }
2656
2657 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2658                                        struct btrfs_root *root)
2659 {
2660         struct btrfs_fs_info *fs_info = root->fs_info;
2661         u64 num_entries =
2662                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2663         u64 avg_runtime;
2664
2665         smp_mb();
2666         avg_runtime = fs_info->avg_delayed_ref_runtime;
2667         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2668                 return 1;
2669
2670         return btrfs_check_space_for_delayed_refs(trans, root);
2671 }
2672
2673 /*
2674  * this starts processing the delayed reference count updates and
2675  * extent insertions we have queued up so far.  count can be
2676  * 0, which means to process everything in the tree at the start
2677  * of the run (but not newly added entries), or it can be some target
2678  * number you'd like to process.
2679  *
2680  * Returns 0 on success or if called with an aborted transaction
2681  * Returns <0 on error and aborts the transaction
2682  */
2683 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2684                            struct btrfs_root *root, unsigned long count)
2685 {
2686         struct rb_node *node;
2687         struct btrfs_delayed_ref_root *delayed_refs;
2688         struct btrfs_delayed_ref_head *head;
2689         int ret;
2690         int run_all = count == (unsigned long)-1;
2691         int run_most = 0;
2692
2693         /* We'll clean this up in btrfs_cleanup_transaction */
2694         if (trans->aborted)
2695                 return 0;
2696
2697         if (root == root->fs_info->extent_root)
2698                 root = root->fs_info->tree_root;
2699
2700         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2701
2702         delayed_refs = &trans->transaction->delayed_refs;
2703         if (count == 0) {
2704                 count = atomic_read(&delayed_refs->num_entries) * 2;
2705                 run_most = 1;
2706         }
2707
2708 again:
2709 #ifdef SCRAMBLE_DELAYED_REFS
2710         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2711 #endif
2712         ret = __btrfs_run_delayed_refs(trans, root, count);
2713         if (ret < 0) {
2714                 btrfs_abort_transaction(trans, root, ret);
2715                 return ret;
2716         }
2717
2718         if (run_all) {
2719                 if (!list_empty(&trans->new_bgs))
2720                         btrfs_create_pending_block_groups(trans, root);
2721
2722                 spin_lock(&delayed_refs->lock);
2723                 node = rb_first(&delayed_refs->href_root);
2724                 if (!node) {
2725                         spin_unlock(&delayed_refs->lock);
2726                         goto out;
2727                 }
2728                 count = (unsigned long)-1;
2729
2730                 while (node) {
2731                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2732                                         href_node);
2733                         if (btrfs_delayed_ref_is_head(&head->node)) {
2734                                 struct btrfs_delayed_ref_node *ref;
2735
2736                                 ref = &head->node;
2737                                 atomic_inc(&ref->refs);
2738
2739                                 spin_unlock(&delayed_refs->lock);
2740                                 /*
2741                                  * Mutex was contended, block until it's
2742                                  * released and try again
2743                                  */
2744                                 mutex_lock(&head->mutex);
2745                                 mutex_unlock(&head->mutex);
2746
2747                                 btrfs_put_delayed_ref(ref);
2748                                 cond_resched();
2749                                 goto again;
2750                         } else {
2751                                 WARN_ON(1);
2752                         }
2753                         node = rb_next(node);
2754                 }
2755                 spin_unlock(&delayed_refs->lock);
2756                 cond_resched();
2757                 goto again;
2758         }
2759 out:
2760         assert_qgroups_uptodate(trans);
2761         return 0;
2762 }
2763
2764 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2765                                 struct btrfs_root *root,
2766                                 u64 bytenr, u64 num_bytes, u64 flags,
2767                                 int level, int is_data)
2768 {
2769         struct btrfs_delayed_extent_op *extent_op;
2770         int ret;
2771
2772         extent_op = btrfs_alloc_delayed_extent_op();
2773         if (!extent_op)
2774                 return -ENOMEM;
2775
2776         extent_op->flags_to_set = flags;
2777         extent_op->update_flags = 1;
2778         extent_op->update_key = 0;
2779         extent_op->is_data = is_data ? 1 : 0;
2780         extent_op->level = level;
2781
2782         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2783                                           num_bytes, extent_op);
2784         if (ret)
2785                 btrfs_free_delayed_extent_op(extent_op);
2786         return ret;
2787 }
2788
2789 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2790                                       struct btrfs_root *root,
2791                                       struct btrfs_path *path,
2792                                       u64 objectid, u64 offset, u64 bytenr)
2793 {
2794         struct btrfs_delayed_ref_head *head;
2795         struct btrfs_delayed_ref_node *ref;
2796         struct btrfs_delayed_data_ref *data_ref;
2797         struct btrfs_delayed_ref_root *delayed_refs;
2798         struct rb_node *node;
2799         int ret = 0;
2800
2801         delayed_refs = &trans->transaction->delayed_refs;
2802         spin_lock(&delayed_refs->lock);
2803         head = btrfs_find_delayed_ref_head(trans, bytenr);
2804         if (!head) {
2805                 spin_unlock(&delayed_refs->lock);
2806                 return 0;
2807         }
2808
2809         if (!mutex_trylock(&head->mutex)) {
2810                 atomic_inc(&head->node.refs);
2811                 spin_unlock(&delayed_refs->lock);
2812
2813                 btrfs_release_path(path);
2814
2815                 /*
2816                  * Mutex was contended, block until it's released and let
2817                  * caller try again
2818                  */
2819                 mutex_lock(&head->mutex);
2820                 mutex_unlock(&head->mutex);
2821                 btrfs_put_delayed_ref(&head->node);
2822                 return -EAGAIN;
2823         }
2824         spin_unlock(&delayed_refs->lock);
2825
2826         spin_lock(&head->lock);
2827         node = rb_first(&head->ref_root);
2828         while (node) {
2829                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2830                 node = rb_next(node);
2831
2832                 /* If it's a shared ref we know a cross reference exists */
2833                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2834                         ret = 1;
2835                         break;
2836                 }
2837
2838                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2839
2840                 /*
2841                  * If our ref doesn't match the one we're currently looking at
2842                  * then we have a cross reference.
2843                  */
2844                 if (data_ref->root != root->root_key.objectid ||
2845                     data_ref->objectid != objectid ||
2846                     data_ref->offset != offset) {
2847                         ret = 1;
2848                         break;
2849                 }
2850         }
2851         spin_unlock(&head->lock);
2852         mutex_unlock(&head->mutex);
2853         return ret;
2854 }
2855
2856 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2857                                         struct btrfs_root *root,
2858                                         struct btrfs_path *path,
2859                                         u64 objectid, u64 offset, u64 bytenr)
2860 {
2861         struct btrfs_root *extent_root = root->fs_info->extent_root;
2862         struct extent_buffer *leaf;
2863         struct btrfs_extent_data_ref *ref;
2864         struct btrfs_extent_inline_ref *iref;
2865         struct btrfs_extent_item *ei;
2866         struct btrfs_key key;
2867         u32 item_size;
2868         int ret;
2869
2870         key.objectid = bytenr;
2871         key.offset = (u64)-1;
2872         key.type = BTRFS_EXTENT_ITEM_KEY;
2873
2874         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2875         if (ret < 0)
2876                 goto out;
2877         BUG_ON(ret == 0); /* Corruption */
2878
2879         ret = -ENOENT;
2880         if (path->slots[0] == 0)
2881                 goto out;
2882
2883         path->slots[0]--;
2884         leaf = path->nodes[0];
2885         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2886
2887         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2888                 goto out;
2889
2890         ret = 1;
2891         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2892 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2893         if (item_size < sizeof(*ei)) {
2894                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2895                 goto out;
2896         }
2897 #endif
2898         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2899
2900         if (item_size != sizeof(*ei) +
2901             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2902                 goto out;
2903
2904         if (btrfs_extent_generation(leaf, ei) <=
2905             btrfs_root_last_snapshot(&root->root_item))
2906                 goto out;
2907
2908         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2909         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2910             BTRFS_EXTENT_DATA_REF_KEY)
2911                 goto out;
2912
2913         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2914         if (btrfs_extent_refs(leaf, ei) !=
2915             btrfs_extent_data_ref_count(leaf, ref) ||
2916             btrfs_extent_data_ref_root(leaf, ref) !=
2917             root->root_key.objectid ||
2918             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2919             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2920                 goto out;
2921
2922         ret = 0;
2923 out:
2924         return ret;
2925 }
2926
2927 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2928                           struct btrfs_root *root,
2929                           u64 objectid, u64 offset, u64 bytenr)
2930 {
2931         struct btrfs_path *path;
2932         int ret;
2933         int ret2;
2934
2935         path = btrfs_alloc_path();
2936         if (!path)
2937                 return -ENOENT;
2938
2939         do {
2940                 ret = check_committed_ref(trans, root, path, objectid,
2941                                           offset, bytenr);
2942                 if (ret && ret != -ENOENT)
2943                         goto out;
2944
2945                 ret2 = check_delayed_ref(trans, root, path, objectid,
2946                                          offset, bytenr);
2947         } while (ret2 == -EAGAIN);
2948
2949         if (ret2 && ret2 != -ENOENT) {
2950                 ret = ret2;
2951                 goto out;
2952         }
2953
2954         if (ret != -ENOENT || ret2 != -ENOENT)
2955                 ret = 0;
2956 out:
2957         btrfs_free_path(path);
2958         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2959                 WARN_ON(ret > 0);
2960         return ret;
2961 }
2962
2963 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2964                            struct btrfs_root *root,
2965                            struct extent_buffer *buf,
2966                            int full_backref, int inc, int for_cow)
2967 {
2968         u64 bytenr;
2969         u64 num_bytes;
2970         u64 parent;
2971         u64 ref_root;
2972         u32 nritems;
2973         struct btrfs_key key;
2974         struct btrfs_file_extent_item *fi;
2975         int i;
2976         int level;
2977         int ret = 0;
2978         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2979                             u64, u64, u64, u64, u64, u64, int);
2980
2981         ref_root = btrfs_header_owner(buf);
2982         nritems = btrfs_header_nritems(buf);
2983         level = btrfs_header_level(buf);
2984
2985         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
2986                 return 0;
2987
2988         if (inc)
2989                 process_func = btrfs_inc_extent_ref;
2990         else
2991                 process_func = btrfs_free_extent;
2992
2993         if (full_backref)
2994                 parent = buf->start;
2995         else
2996                 parent = 0;
2997
2998         for (i = 0; i < nritems; i++) {
2999                 if (level == 0) {
3000                         btrfs_item_key_to_cpu(buf, &key, i);
3001                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3002                                 continue;
3003                         fi = btrfs_item_ptr(buf, i,
3004                                             struct btrfs_file_extent_item);
3005                         if (btrfs_file_extent_type(buf, fi) ==
3006                             BTRFS_FILE_EXTENT_INLINE)
3007                                 continue;
3008                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3009                         if (bytenr == 0)
3010                                 continue;
3011
3012                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3013                         key.offset -= btrfs_file_extent_offset(buf, fi);
3014                         ret = process_func(trans, root, bytenr, num_bytes,
3015                                            parent, ref_root, key.objectid,
3016                                            key.offset, for_cow);
3017                         if (ret)
3018                                 goto fail;
3019                 } else {
3020                         bytenr = btrfs_node_blockptr(buf, i);
3021                         num_bytes = btrfs_level_size(root, level - 1);
3022                         ret = process_func(trans, root, bytenr, num_bytes,
3023                                            parent, ref_root, level - 1, 0,
3024                                            for_cow);
3025                         if (ret)
3026                                 goto fail;
3027                 }
3028         }
3029         return 0;
3030 fail:
3031         return ret;
3032 }
3033
3034 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3035                   struct extent_buffer *buf, int full_backref, int for_cow)
3036 {
3037         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3038 }
3039
3040 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3041                   struct extent_buffer *buf, int full_backref, int for_cow)
3042 {
3043         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3044 }
3045
3046 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3047                                  struct btrfs_root *root,
3048                                  struct btrfs_path *path,
3049                                  struct btrfs_block_group_cache *cache)
3050 {
3051         int ret;
3052         struct btrfs_root *extent_root = root->fs_info->extent_root;
3053         unsigned long bi;
3054         struct extent_buffer *leaf;
3055
3056         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3057         if (ret < 0)
3058                 goto fail;
3059         BUG_ON(ret); /* Corruption */
3060
3061         leaf = path->nodes[0];
3062         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3063         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3064         btrfs_mark_buffer_dirty(leaf);
3065         btrfs_release_path(path);
3066 fail:
3067         if (ret) {
3068                 btrfs_abort_transaction(trans, root, ret);
3069                 return ret;
3070         }
3071         return 0;
3072
3073 }
3074
3075 static struct btrfs_block_group_cache *
3076 next_block_group(struct btrfs_root *root,
3077                  struct btrfs_block_group_cache *cache)
3078 {
3079         struct rb_node *node;
3080         spin_lock(&root->fs_info->block_group_cache_lock);
3081         node = rb_next(&cache->cache_node);
3082         btrfs_put_block_group(cache);
3083         if (node) {
3084                 cache = rb_entry(node, struct btrfs_block_group_cache,
3085                                  cache_node);
3086                 btrfs_get_block_group(cache);
3087         } else
3088                 cache = NULL;
3089         spin_unlock(&root->fs_info->block_group_cache_lock);
3090         return cache;
3091 }
3092
3093 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3094                             struct btrfs_trans_handle *trans,
3095                             struct btrfs_path *path)
3096 {
3097         struct btrfs_root *root = block_group->fs_info->tree_root;
3098         struct inode *inode = NULL;
3099         u64 alloc_hint = 0;
3100         int dcs = BTRFS_DC_ERROR;
3101         int num_pages = 0;
3102         int retries = 0;
3103         int ret = 0;
3104
3105         /*
3106          * If this block group is smaller than 100 megs don't bother caching the
3107          * block group.
3108          */
3109         if (block_group->key.offset < (100 * 1024 * 1024)) {
3110                 spin_lock(&block_group->lock);
3111                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3112                 spin_unlock(&block_group->lock);
3113                 return 0;
3114         }
3115
3116 again:
3117         inode = lookup_free_space_inode(root, block_group, path);
3118         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3119                 ret = PTR_ERR(inode);
3120                 btrfs_release_path(path);
3121                 goto out;
3122         }
3123
3124         if (IS_ERR(inode)) {
3125                 BUG_ON(retries);
3126                 retries++;
3127
3128                 if (block_group->ro)
3129                         goto out_free;
3130
3131                 ret = create_free_space_inode(root, trans, block_group, path);
3132                 if (ret)
3133                         goto out_free;
3134                 goto again;
3135         }
3136
3137         /* We've already setup this transaction, go ahead and exit */
3138         if (block_group->cache_generation == trans->transid &&
3139             i_size_read(inode)) {
3140                 dcs = BTRFS_DC_SETUP;
3141                 goto out_put;
3142         }
3143
3144         /*
3145          * We want to set the generation to 0, that way if anything goes wrong
3146          * from here on out we know not to trust this cache when we load up next
3147          * time.
3148          */
3149         BTRFS_I(inode)->generation = 0;
3150         ret = btrfs_update_inode(trans, root, inode);
3151         WARN_ON(ret);
3152
3153         if (i_size_read(inode) > 0) {
3154                 ret = btrfs_check_trunc_cache_free_space(root,
3155                                         &root->fs_info->global_block_rsv);
3156                 if (ret)
3157                         goto out_put;
3158
3159                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3160                 if (ret)
3161                         goto out_put;
3162         }
3163
3164         spin_lock(&block_group->lock);
3165         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3166             !btrfs_test_opt(root, SPACE_CACHE)) {
3167                 /*
3168                  * don't bother trying to write stuff out _if_
3169                  * a) we're not cached,
3170                  * b) we're with nospace_cache mount option.
3171                  */
3172                 dcs = BTRFS_DC_WRITTEN;
3173                 spin_unlock(&block_group->lock);
3174                 goto out_put;
3175         }
3176         spin_unlock(&block_group->lock);
3177
3178         /*
3179          * Try to preallocate enough space based on how big the block group is.
3180          * Keep in mind this has to include any pinned space which could end up
3181          * taking up quite a bit since it's not folded into the other space
3182          * cache.
3183          */
3184         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3185         if (!num_pages)
3186                 num_pages = 1;
3187
3188         num_pages *= 16;
3189         num_pages *= PAGE_CACHE_SIZE;
3190
3191         ret = btrfs_check_data_free_space(inode, num_pages);
3192         if (ret)
3193                 goto out_put;
3194
3195         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3196                                               num_pages, num_pages,
3197                                               &alloc_hint);
3198         if (!ret)
3199                 dcs = BTRFS_DC_SETUP;
3200         btrfs_free_reserved_data_space(inode, num_pages);
3201
3202 out_put:
3203         iput(inode);
3204 out_free:
3205         btrfs_release_path(path);
3206 out:
3207         spin_lock(&block_group->lock);
3208         if (!ret && dcs == BTRFS_DC_SETUP)
3209                 block_group->cache_generation = trans->transid;
3210         block_group->disk_cache_state = dcs;
3211         spin_unlock(&block_group->lock);
3212
3213         return ret;
3214 }
3215
3216 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3217                                    struct btrfs_root *root)
3218 {
3219         struct btrfs_block_group_cache *cache;
3220         int err = 0;
3221         struct btrfs_path *path;
3222         u64 last = 0;
3223
3224         path = btrfs_alloc_path();
3225         if (!path)
3226                 return -ENOMEM;
3227
3228 again:
3229         while (1) {
3230                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3231                 while (cache) {
3232                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3233                                 break;
3234                         cache = next_block_group(root, cache);
3235                 }
3236                 if (!cache) {
3237                         if (last == 0)
3238                                 break;
3239                         last = 0;
3240                         continue;
3241                 }
3242                 err = cache_save_setup(cache, trans, path);
3243                 last = cache->key.objectid + cache->key.offset;
3244                 btrfs_put_block_group(cache);
3245         }
3246
3247         while (1) {
3248                 if (last == 0) {
3249                         err = btrfs_run_delayed_refs(trans, root,
3250                                                      (unsigned long)-1);
3251                         if (err) /* File system offline */
3252                                 goto out;
3253                 }
3254
3255                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3256                 while (cache) {
3257                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3258                                 btrfs_put_block_group(cache);
3259                                 goto again;
3260                         }
3261
3262                         if (cache->dirty)
3263                                 break;
3264                         cache = next_block_group(root, cache);
3265                 }
3266                 if (!cache) {
3267                         if (last == 0)
3268                                 break;
3269                         last = 0;
3270                         continue;
3271                 }
3272
3273                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3274                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3275                 cache->dirty = 0;
3276                 last = cache->key.objectid + cache->key.offset;
3277
3278                 err = write_one_cache_group(trans, root, path, cache);
3279                 btrfs_put_block_group(cache);
3280                 if (err) /* File system offline */
3281                         goto out;
3282         }
3283
3284         while (1) {
3285                 /*
3286                  * I don't think this is needed since we're just marking our
3287                  * preallocated extent as written, but just in case it can't
3288                  * hurt.
3289                  */
3290                 if (last == 0) {
3291                         err = btrfs_run_delayed_refs(trans, root,
3292                                                      (unsigned long)-1);
3293                         if (err) /* File system offline */
3294                                 goto out;
3295                 }
3296
3297                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3298                 while (cache) {
3299                         /*
3300                          * Really this shouldn't happen, but it could if we
3301                          * couldn't write the entire preallocated extent and
3302                          * splitting the extent resulted in a new block.
3303                          */
3304                         if (cache->dirty) {
3305                                 btrfs_put_block_group(cache);
3306                                 goto again;
3307                         }
3308                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3309                                 break;
3310                         cache = next_block_group(root, cache);
3311                 }
3312                 if (!cache) {
3313                         if (last == 0)
3314                                 break;
3315                         last = 0;
3316                         continue;
3317                 }
3318
3319                 err = btrfs_write_out_cache(root, trans, cache, path);
3320
3321                 /*
3322                  * If we didn't have an error then the cache state is still
3323                  * NEED_WRITE, so we can set it to WRITTEN.
3324                  */
3325                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3326                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3327                 last = cache->key.objectid + cache->key.offset;
3328                 btrfs_put_block_group(cache);
3329         }
3330 out:
3331
3332         btrfs_free_path(path);
3333         return err;
3334 }
3335
3336 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3337 {
3338         struct btrfs_block_group_cache *block_group;
3339         int readonly = 0;
3340
3341         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3342         if (!block_group || block_group->ro)
3343                 readonly = 1;
3344         if (block_group)
3345                 btrfs_put_block_group(block_group);
3346         return readonly;
3347 }
3348
3349 static const char *alloc_name(u64 flags)
3350 {
3351         switch (flags) {
3352         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3353                 return "mixed";
3354         case BTRFS_BLOCK_GROUP_METADATA:
3355                 return "metadata";
3356         case BTRFS_BLOCK_GROUP_DATA:
3357                 return "data";
3358         case BTRFS_BLOCK_GROUP_SYSTEM:
3359                 return "system";
3360         default:
3361                 WARN_ON(1);
3362                 return "invalid-combination";
3363         };
3364 }
3365
3366 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3367                              u64 total_bytes, u64 bytes_used,
3368                              struct btrfs_space_info **space_info)
3369 {
3370         struct btrfs_space_info *found;
3371         int i;
3372         int factor;
3373         int ret;
3374
3375         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3376                      BTRFS_BLOCK_GROUP_RAID10))
3377                 factor = 2;
3378         else
3379                 factor = 1;
3380
3381         found = __find_space_info(info, flags);
3382         if (found) {
3383                 spin_lock(&found->lock);
3384                 found->total_bytes += total_bytes;
3385                 found->disk_total += total_bytes * factor;
3386                 found->bytes_used += bytes_used;
3387                 found->disk_used += bytes_used * factor;
3388                 found->full = 0;
3389                 spin_unlock(&found->lock);
3390                 *space_info = found;
3391                 return 0;
3392         }
3393         found = kzalloc(sizeof(*found), GFP_NOFS);
3394         if (!found)
3395                 return -ENOMEM;
3396
3397         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3398         if (ret) {
3399                 kfree(found);
3400                 return ret;
3401         }
3402
3403         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3404                 INIT_LIST_HEAD(&found->block_groups[i]);
3405                 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3406         }
3407         init_rwsem(&found->groups_sem);
3408         spin_lock_init(&found->lock);
3409         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3410         found->total_bytes = total_bytes;
3411         found->disk_total = total_bytes * factor;
3412         found->bytes_used = bytes_used;
3413         found->disk_used = bytes_used * factor;
3414         found->bytes_pinned = 0;
3415         found->bytes_reserved = 0;
3416         found->bytes_readonly = 0;
3417         found->bytes_may_use = 0;
3418         found->full = 0;
3419         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3420         found->chunk_alloc = 0;
3421         found->flush = 0;
3422         init_waitqueue_head(&found->wait);
3423
3424         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3425                                     info->space_info_kobj, "%s",
3426                                     alloc_name(found->flags));
3427         if (ret) {
3428                 kfree(found);
3429                 return ret;
3430         }
3431
3432         *space_info = found;
3433         list_add_rcu(&found->list, &info->space_info);
3434         if (flags & BTRFS_BLOCK_GROUP_DATA)
3435                 info->data_sinfo = found;
3436
3437         return ret;
3438 }
3439
3440 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3441 {
3442         u64 extra_flags = chunk_to_extended(flags) &
3443                                 BTRFS_EXTENDED_PROFILE_MASK;
3444
3445         write_seqlock(&fs_info->profiles_lock);
3446         if (flags & BTRFS_BLOCK_GROUP_DATA)
3447                 fs_info->avail_data_alloc_bits |= extra_flags;
3448         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3449                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3450         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3451                 fs_info->avail_system_alloc_bits |= extra_flags;
3452         write_sequnlock(&fs_info->profiles_lock);
3453 }
3454
3455 /*
3456  * returns target flags in extended format or 0 if restripe for this
3457  * chunk_type is not in progress
3458  *
3459  * should be called with either volume_mutex or balance_lock held
3460  */
3461 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3462 {
3463         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3464         u64 target = 0;
3465
3466         if (!bctl)
3467                 return 0;
3468
3469         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3470             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3471                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3472         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3473                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3474                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3475         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3476                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3477                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3478         }
3479
3480         return target;
3481 }
3482
3483 /*
3484  * @flags: available profiles in extended format (see ctree.h)
3485  *
3486  * Returns reduced profile in chunk format.  If profile changing is in
3487  * progress (either running or paused) picks the target profile (if it's
3488  * already available), otherwise falls back to plain reducing.
3489  */
3490 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3491 {
3492         /*
3493          * we add in the count of missing devices because we want
3494          * to make sure that any RAID levels on a degraded FS
3495          * continue to be honored.
3496          */
3497         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3498                 root->fs_info->fs_devices->missing_devices;
3499         u64 target;
3500         u64 tmp;
3501
3502         /*
3503          * see if restripe for this chunk_type is in progress, if so
3504          * try to reduce to the target profile
3505          */
3506         spin_lock(&root->fs_info->balance_lock);
3507         target = get_restripe_target(root->fs_info, flags);
3508         if (target) {
3509                 /* pick target profile only if it's already available */
3510                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3511                         spin_unlock(&root->fs_info->balance_lock);
3512                         return extended_to_chunk(target);
3513                 }
3514         }
3515         spin_unlock(&root->fs_info->balance_lock);
3516
3517         /* First, mask out the RAID levels which aren't possible */
3518         if (num_devices == 1)
3519                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3520                            BTRFS_BLOCK_GROUP_RAID5);
3521         if (num_devices < 3)
3522                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3523         if (num_devices < 4)
3524                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3525
3526         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3527                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3528                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3529         flags &= ~tmp;
3530
3531         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3532                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3533         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3534                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3535         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3536                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3537         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3538                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3539         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3540                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3541
3542         return extended_to_chunk(flags | tmp);
3543 }
3544
3545 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3546 {
3547         unsigned seq;
3548         u64 flags;
3549
3550         do {
3551                 flags = orig_flags;
3552                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3553
3554                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3555                         flags |= root->fs_info->avail_data_alloc_bits;
3556                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3557                         flags |= root->fs_info->avail_system_alloc_bits;
3558                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3559                         flags |= root->fs_info->avail_metadata_alloc_bits;
3560         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3561
3562         return btrfs_reduce_alloc_profile(root, flags);
3563 }
3564
3565 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3566 {
3567         u64 flags;
3568         u64 ret;
3569
3570         if (data)
3571                 flags = BTRFS_BLOCK_GROUP_DATA;
3572         else if (root == root->fs_info->chunk_root)
3573                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3574         else
3575                 flags = BTRFS_BLOCK_GROUP_METADATA;
3576
3577         ret = get_alloc_profile(root, flags);
3578         return ret;
3579 }
3580
3581 /*
3582  * This will check the space that the inode allocates from to make sure we have
3583  * enough space for bytes.
3584  */
3585 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3586 {
3587         struct btrfs_space_info *data_sinfo;
3588         struct btrfs_root *root = BTRFS_I(inode)->root;
3589         struct btrfs_fs_info *fs_info = root->fs_info;
3590         u64 used;
3591         int ret = 0, committed = 0, alloc_chunk = 1;
3592
3593         /* make sure bytes are sectorsize aligned */
3594         bytes = ALIGN(bytes, root->sectorsize);
3595
3596         if (btrfs_is_free_space_inode(inode)) {
3597                 committed = 1;
3598                 ASSERT(current->journal_info);
3599         }
3600
3601         data_sinfo = fs_info->data_sinfo;
3602         if (!data_sinfo)
3603                 goto alloc;
3604
3605 again:
3606         /* make sure we have enough space to handle the data first */
3607         spin_lock(&data_sinfo->lock);
3608         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3609                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3610                 data_sinfo->bytes_may_use;
3611
3612         if (used + bytes > data_sinfo->total_bytes) {
3613                 struct btrfs_trans_handle *trans;
3614
3615                 /*
3616                  * if we don't have enough free bytes in this space then we need
3617                  * to alloc a new chunk.
3618                  */
3619                 if (!data_sinfo->full && alloc_chunk) {
3620                         u64 alloc_target;
3621
3622                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3623                         spin_unlock(&data_sinfo->lock);
3624 alloc:
3625                         alloc_target = btrfs_get_alloc_profile(root, 1);
3626                         /*
3627                          * It is ugly that we don't call nolock join
3628                          * transaction for the free space inode case here.
3629                          * But it is safe because we only do the data space
3630                          * reservation for the free space cache in the
3631                          * transaction context, the common join transaction
3632                          * just increase the counter of the current transaction
3633                          * handler, doesn't try to acquire the trans_lock of
3634                          * the fs.
3635                          */
3636                         trans = btrfs_join_transaction(root);
3637                         if (IS_ERR(trans))
3638                                 return PTR_ERR(trans);
3639
3640                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3641                                              alloc_target,
3642                                              CHUNK_ALLOC_NO_FORCE);
3643                         btrfs_end_transaction(trans, root);
3644                         if (ret < 0) {
3645                                 if (ret != -ENOSPC)
3646                                         return ret;
3647                                 else
3648                                         goto commit_trans;
3649                         }
3650
3651                         if (!data_sinfo)
3652                                 data_sinfo = fs_info->data_sinfo;
3653
3654                         goto again;
3655                 }
3656
3657                 /*
3658                  * If we don't have enough pinned space to deal with this
3659                  * allocation don't bother committing the transaction.
3660                  */
3661                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3662                                            bytes) < 0)
3663                         committed = 1;
3664                 spin_unlock(&data_sinfo->lock);
3665
3666                 /* commit the current transaction and try again */
3667 commit_trans:
3668                 if (!committed &&
3669                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3670                         committed = 1;
3671
3672                         trans = btrfs_join_transaction(root);
3673                         if (IS_ERR(trans))
3674                                 return PTR_ERR(trans);
3675                         ret = btrfs_commit_transaction(trans, root);
3676                         if (ret)
3677                                 return ret;
3678                         goto again;
3679                 }
3680
3681                 trace_btrfs_space_reservation(root->fs_info,
3682                                               "space_info:enospc",
3683                                               data_sinfo->flags, bytes, 1);
3684                 return -ENOSPC;
3685         }
3686         data_sinfo->bytes_may_use += bytes;
3687         trace_btrfs_space_reservation(root->fs_info, "space_info",
3688                                       data_sinfo->flags, bytes, 1);
3689         spin_unlock(&data_sinfo->lock);
3690
3691         return 0;
3692 }
3693
3694 /*
3695  * Called if we need to clear a data reservation for this inode.
3696  */
3697 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3698 {
3699         struct btrfs_root *root = BTRFS_I(inode)->root;
3700         struct btrfs_space_info *data_sinfo;
3701
3702         /* make sure bytes are sectorsize aligned */
3703         bytes = ALIGN(bytes, root->sectorsize);
3704
3705         data_sinfo = root->fs_info->data_sinfo;
3706         spin_lock(&data_sinfo->lock);
3707         WARN_ON(data_sinfo->bytes_may_use < bytes);
3708         data_sinfo->bytes_may_use -= bytes;
3709         trace_btrfs_space_reservation(root->fs_info, "space_info",
3710                                       data_sinfo->flags, bytes, 0);
3711         spin_unlock(&data_sinfo->lock);
3712 }
3713
3714 static void force_metadata_allocation(struct btrfs_fs_info *info)
3715 {
3716         struct list_head *head = &info->space_info;
3717         struct btrfs_space_info *found;
3718
3719         rcu_read_lock();
3720         list_for_each_entry_rcu(found, head, list) {
3721                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3722                         found->force_alloc = CHUNK_ALLOC_FORCE;
3723         }
3724         rcu_read_unlock();
3725 }
3726
3727 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3728 {
3729         return (global->size << 1);
3730 }
3731
3732 static int should_alloc_chunk(struct btrfs_root *root,
3733                               struct btrfs_space_info *sinfo, int force)
3734 {
3735         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3736         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3737         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3738         u64 thresh;
3739
3740         if (force == CHUNK_ALLOC_FORCE)
3741                 return 1;
3742
3743         /*
3744          * We need to take into account the global rsv because for all intents
3745          * and purposes it's used space.  Don't worry about locking the
3746          * global_rsv, it doesn't change except when the transaction commits.
3747          */
3748         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3749                 num_allocated += calc_global_rsv_need_space(global_rsv);
3750
3751         /*
3752          * in limited mode, we want to have some free space up to
3753          * about 1% of the FS size.
3754          */
3755         if (force == CHUNK_ALLOC_LIMITED) {
3756                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3757                 thresh = max_t(u64, 64 * 1024 * 1024,
3758                                div_factor_fine(thresh, 1));
3759
3760                 if (num_bytes - num_allocated < thresh)
3761                         return 1;
3762         }
3763
3764         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3765                 return 0;
3766         return 1;
3767 }
3768
3769 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3770 {
3771         u64 num_dev;
3772
3773         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3774                     BTRFS_BLOCK_GROUP_RAID0 |
3775                     BTRFS_BLOCK_GROUP_RAID5 |
3776                     BTRFS_BLOCK_GROUP_RAID6))
3777                 num_dev = root->fs_info->fs_devices->rw_devices;
3778         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3779                 num_dev = 2;
3780         else
3781                 num_dev = 1;    /* DUP or single */
3782
3783         /* metadata for updaing devices and chunk tree */
3784         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3785 }
3786
3787 static void check_system_chunk(struct btrfs_trans_handle *trans,
3788                                struct btrfs_root *root, u64 type)
3789 {
3790         struct btrfs_space_info *info;
3791         u64 left;
3792         u64 thresh;
3793
3794         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3795         spin_lock(&info->lock);
3796         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3797                 info->bytes_reserved - info->bytes_readonly;
3798         spin_unlock(&info->lock);
3799
3800         thresh = get_system_chunk_thresh(root, type);
3801         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3802                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3803                         left, thresh, type);
3804                 dump_space_info(info, 0, 0);
3805         }
3806
3807         if (left < thresh) {
3808                 u64 flags;
3809
3810                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3811                 btrfs_alloc_chunk(trans, root, flags);
3812         }
3813 }
3814
3815 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3816                           struct btrfs_root *extent_root, u64 flags, int force)
3817 {
3818         struct btrfs_space_info *space_info;
3819         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3820         int wait_for_alloc = 0;
3821         int ret = 0;
3822
3823         /* Don't re-enter if we're already allocating a chunk */
3824         if (trans->allocating_chunk)
3825                 return -ENOSPC;
3826
3827         space_info = __find_space_info(extent_root->fs_info, flags);
3828         if (!space_info) {
3829                 ret = update_space_info(extent_root->fs_info, flags,
3830                                         0, 0, &space_info);
3831                 BUG_ON(ret); /* -ENOMEM */
3832         }
3833         BUG_ON(!space_info); /* Logic error */
3834
3835 again:
3836         spin_lock(&space_info->lock);
3837         if (force < space_info->force_alloc)
3838                 force = space_info->force_alloc;
3839         if (space_info->full) {
3840                 if (should_alloc_chunk(extent_root, space_info, force))
3841                         ret = -ENOSPC;
3842                 else
3843                         ret = 0;
3844                 spin_unlock(&space_info->lock);
3845                 return ret;
3846         }
3847
3848         if (!should_alloc_chunk(extent_root, space_info, force)) {
3849                 spin_unlock(&space_info->lock);
3850                 return 0;
3851         } else if (space_info->chunk_alloc) {
3852                 wait_for_alloc = 1;
3853         } else {
3854                 space_info->chunk_alloc = 1;
3855         }
3856
3857         spin_unlock(&space_info->lock);
3858
3859         mutex_lock(&fs_info->chunk_mutex);
3860
3861         /*
3862          * The chunk_mutex is held throughout the entirety of a chunk
3863          * allocation, so once we've acquired the chunk_mutex we know that the
3864          * other guy is done and we need to recheck and see if we should
3865          * allocate.
3866          */
3867         if (wait_for_alloc) {
3868                 mutex_unlock(&fs_info->chunk_mutex);
3869                 wait_for_alloc = 0;
3870                 goto again;
3871         }
3872
3873         trans->allocating_chunk = true;
3874
3875         /*
3876          * If we have mixed data/metadata chunks we want to make sure we keep
3877          * allocating mixed chunks instead of individual chunks.
3878          */
3879         if (btrfs_mixed_space_info(space_info))
3880                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3881
3882         /*
3883          * if we're doing a data chunk, go ahead and make sure that
3884          * we keep a reasonable number of metadata chunks allocated in the
3885          * FS as well.
3886          */
3887         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3888                 fs_info->data_chunk_allocations++;
3889                 if (!(fs_info->data_chunk_allocations %
3890                       fs_info->metadata_ratio))
3891                         force_metadata_allocation(fs_info);
3892         }
3893
3894         /*
3895          * Check if we have enough space in SYSTEM chunk because we may need
3896          * to update devices.
3897          */
3898         check_system_chunk(trans, extent_root, flags);
3899
3900         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3901         trans->allocating_chunk = false;
3902
3903         spin_lock(&space_info->lock);
3904         if (ret < 0 && ret != -ENOSPC)
3905                 goto out;
3906         if (ret)
3907                 space_info->full = 1;
3908         else
3909                 ret = 1;
3910
3911         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3912 out:
3913         space_info->chunk_alloc = 0;
3914         spin_unlock(&space_info->lock);
3915         mutex_unlock(&fs_info->chunk_mutex);
3916         return ret;
3917 }
3918
3919 static int can_overcommit(struct btrfs_root *root,
3920                           struct btrfs_space_info *space_info, u64 bytes,
3921                           enum btrfs_reserve_flush_enum flush)
3922 {
3923         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3924         u64 profile = btrfs_get_alloc_profile(root, 0);
3925         u64 space_size;
3926         u64 avail;
3927         u64 used;
3928
3929         used = space_info->bytes_used + space_info->bytes_reserved +
3930                 space_info->bytes_pinned + space_info->bytes_readonly;
3931
3932         /*
3933          * We only want to allow over committing if we have lots of actual space
3934          * free, but if we don't have enough space to handle the global reserve
3935          * space then we could end up having a real enospc problem when trying
3936          * to allocate a chunk or some other such important allocation.
3937          */
3938         spin_lock(&global_rsv->lock);
3939         space_size = calc_global_rsv_need_space(global_rsv);
3940         spin_unlock(&global_rsv->lock);
3941         if (used + space_size >= space_info->total_bytes)
3942                 return 0;
3943
3944         used += space_info->bytes_may_use;
3945
3946         spin_lock(&root->fs_info->free_chunk_lock);
3947         avail = root->fs_info->free_chunk_space;
3948         spin_unlock(&root->fs_info->free_chunk_lock);
3949
3950         /*
3951          * If we have dup, raid1 or raid10 then only half of the free
3952          * space is actually useable.  For raid56, the space info used
3953          * doesn't include the parity drive, so we don't have to
3954          * change the math
3955          */
3956         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3957                        BTRFS_BLOCK_GROUP_RAID1 |
3958                        BTRFS_BLOCK_GROUP_RAID10))
3959                 avail >>= 1;
3960
3961         /*
3962          * If we aren't flushing all things, let us overcommit up to
3963          * 1/2th of the space. If we can flush, don't let us overcommit
3964          * too much, let it overcommit up to 1/8 of the space.
3965          */
3966         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3967                 avail >>= 3;
3968         else
3969                 avail >>= 1;
3970
3971         if (used + bytes < space_info->total_bytes + avail)
3972                 return 1;
3973         return 0;
3974 }
3975
3976 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3977                                          unsigned long nr_pages, int nr_items)
3978 {
3979         struct super_block *sb = root->fs_info->sb;
3980
3981         if (down_read_trylock(&sb->s_umount)) {
3982                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3983                 up_read(&sb->s_umount);
3984         } else {
3985                 /*
3986                  * We needn't worry the filesystem going from r/w to r/o though
3987                  * we don't acquire ->s_umount mutex, because the filesystem
3988                  * should guarantee the delalloc inodes list be empty after
3989                  * the filesystem is readonly(all dirty pages are written to
3990                  * the disk).
3991                  */
3992                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
3993                 if (!current->journal_info)
3994                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
3995         }
3996 }
3997
3998 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
3999 {
4000         u64 bytes;
4001         int nr;
4002
4003         bytes = btrfs_calc_trans_metadata_size(root, 1);
4004         nr = (int)div64_u64(to_reclaim, bytes);
4005         if (!nr)
4006                 nr = 1;
4007         return nr;
4008 }
4009
4010 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4011
4012 /*
4013  * shrink metadata reservation for delalloc
4014  */
4015 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4016                             bool wait_ordered)
4017 {
4018         struct btrfs_block_rsv *block_rsv;
4019         struct btrfs_space_info *space_info;
4020         struct btrfs_trans_handle *trans;
4021         u64 delalloc_bytes;
4022         u64 max_reclaim;
4023         long time_left;
4024         unsigned long nr_pages;
4025         int loops;
4026         int items;
4027         enum btrfs_reserve_flush_enum flush;
4028
4029         /* Calc the number of the pages we need flush for space reservation */
4030         items = calc_reclaim_items_nr(root, to_reclaim);
4031         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4032
4033         trans = (struct btrfs_trans_handle *)current->journal_info;
4034         block_rsv = &root->fs_info->delalloc_block_rsv;
4035         space_info = block_rsv->space_info;
4036
4037         delalloc_bytes = percpu_counter_sum_positive(
4038                                                 &root->fs_info->delalloc_bytes);
4039         if (delalloc_bytes == 0) {
4040                 if (trans)
4041                         return;
4042                 if (wait_ordered)
4043                         btrfs_wait_ordered_roots(root->fs_info, items);
4044                 return;
4045         }
4046
4047         loops = 0;
4048         while (delalloc_bytes && loops < 3) {
4049                 max_reclaim = min(delalloc_bytes, to_reclaim);
4050                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4051                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4052                 /*
4053                  * We need to wait for the async pages to actually start before
4054                  * we do anything.
4055                  */
4056                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4057                 if (!max_reclaim)
4058                         goto skip_async;
4059
4060                 if (max_reclaim <= nr_pages)
4061                         max_reclaim = 0;
4062                 else
4063                         max_reclaim -= nr_pages;
4064
4065                 wait_event(root->fs_info->async_submit_wait,
4066                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4067                            (int)max_reclaim);
4068 skip_async:
4069                 if (!trans)
4070                         flush = BTRFS_RESERVE_FLUSH_ALL;
4071                 else
4072                         flush = BTRFS_RESERVE_NO_FLUSH;
4073                 spin_lock(&space_info->lock);
4074                 if (can_overcommit(root, space_info, orig, flush)) {
4075                         spin_unlock(&space_info->lock);
4076                         break;
4077                 }
4078                 spin_unlock(&space_info->lock);
4079
4080                 loops++;
4081                 if (wait_ordered && !trans) {
4082                         btrfs_wait_ordered_roots(root->fs_info, items);
4083                 } else {
4084                         time_left = schedule_timeout_killable(1);
4085                         if (time_left)
4086                                 break;
4087                 }
4088                 delalloc_bytes = percpu_counter_sum_positive(
4089                                                 &root->fs_info->delalloc_bytes);
4090         }
4091 }
4092
4093 /**
4094  * maybe_commit_transaction - possibly commit the transaction if its ok to
4095  * @root - the root we're allocating for
4096  * @bytes - the number of bytes we want to reserve
4097  * @force - force the commit
4098  *
4099  * This will check to make sure that committing the transaction will actually
4100  * get us somewhere and then commit the transaction if it does.  Otherwise it
4101  * will return -ENOSPC.
4102  */
4103 static int may_commit_transaction(struct btrfs_root *root,
4104                                   struct btrfs_space_info *space_info,
4105                                   u64 bytes, int force)
4106 {
4107         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4108         struct btrfs_trans_handle *trans;
4109
4110         trans = (struct btrfs_trans_handle *)current->journal_info;
4111         if (trans)
4112                 return -EAGAIN;
4113
4114         if (force)
4115                 goto commit;
4116
4117         /* See if there is enough pinned space to make this reservation */
4118         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4119                                    bytes) >= 0)
4120                 goto commit;
4121
4122         /*
4123          * See if there is some space in the delayed insertion reservation for
4124          * this reservation.
4125          */
4126         if (space_info != delayed_rsv->space_info)
4127                 return -ENOSPC;
4128
4129         spin_lock(&delayed_rsv->lock);
4130         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4131                                    bytes - delayed_rsv->size) >= 0) {
4132                 spin_unlock(&delayed_rsv->lock);
4133                 return -ENOSPC;
4134         }
4135         spin_unlock(&delayed_rsv->lock);
4136
4137 commit:
4138         trans = btrfs_join_transaction(root);
4139         if (IS_ERR(trans))
4140                 return -ENOSPC;
4141
4142         return btrfs_commit_transaction(trans, root);
4143 }
4144
4145 enum flush_state {
4146         FLUSH_DELAYED_ITEMS_NR  =       1,
4147         FLUSH_DELAYED_ITEMS     =       2,
4148         FLUSH_DELALLOC          =       3,
4149         FLUSH_DELALLOC_WAIT     =       4,
4150         ALLOC_CHUNK             =       5,
4151         COMMIT_TRANS            =       6,
4152 };
4153
4154 static int flush_space(struct btrfs_root *root,
4155                        struct btrfs_space_info *space_info, u64 num_bytes,
4156                        u64 orig_bytes, int state)
4157 {
4158         struct btrfs_trans_handle *trans;
4159         int nr;
4160         int ret = 0;
4161
4162         switch (state) {
4163         case FLUSH_DELAYED_ITEMS_NR:
4164         case FLUSH_DELAYED_ITEMS:
4165                 if (state == FLUSH_DELAYED_ITEMS_NR)
4166                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4167                 else
4168                         nr = -1;
4169
4170                 trans = btrfs_join_transaction(root);
4171                 if (IS_ERR(trans)) {
4172                         ret = PTR_ERR(trans);
4173                         break;
4174                 }
4175                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4176                 btrfs_end_transaction(trans, root);
4177                 break;
4178         case FLUSH_DELALLOC:
4179         case FLUSH_DELALLOC_WAIT:
4180                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4181                                 state == FLUSH_DELALLOC_WAIT);
4182                 break;
4183         case ALLOC_CHUNK:
4184                 trans = btrfs_join_transaction(root);
4185                 if (IS_ERR(trans)) {
4186                         ret = PTR_ERR(trans);
4187                         break;
4188                 }
4189                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4190                                      btrfs_get_alloc_profile(root, 0),
4191                                      CHUNK_ALLOC_NO_FORCE);
4192                 btrfs_end_transaction(trans, root);
4193                 if (ret == -ENOSPC)
4194                         ret = 0;
4195                 break;
4196         case COMMIT_TRANS:
4197                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4198                 break;
4199         default:
4200                 ret = -ENOSPC;
4201                 break;
4202         }
4203
4204         return ret;
4205 }
4206
4207 static inline u64
4208 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4209                                  struct btrfs_space_info *space_info)
4210 {
4211         u64 used;
4212         u64 expected;
4213         u64 to_reclaim;
4214
4215         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4216                                 16 * 1024 * 1024);
4217         spin_lock(&space_info->lock);
4218         if (can_overcommit(root, space_info, to_reclaim,
4219                            BTRFS_RESERVE_FLUSH_ALL)) {
4220                 to_reclaim = 0;
4221                 goto out;
4222         }
4223
4224         used = space_info->bytes_used + space_info->bytes_reserved +
4225                space_info->bytes_pinned + space_info->bytes_readonly +
4226                space_info->bytes_may_use;
4227         if (can_overcommit(root, space_info, 1024 * 1024,
4228                            BTRFS_RESERVE_FLUSH_ALL))
4229                 expected = div_factor_fine(space_info->total_bytes, 95);
4230         else
4231                 expected = div_factor_fine(space_info->total_bytes, 90);
4232
4233         if (used > expected)
4234                 to_reclaim = used - expected;
4235         else
4236                 to_reclaim = 0;
4237         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4238                                      space_info->bytes_reserved);
4239 out:
4240         spin_unlock(&space_info->lock);
4241
4242         return to_reclaim;
4243 }
4244
4245 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4246                                         struct btrfs_fs_info *fs_info, u64 used)
4247 {
4248         return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4249                 !btrfs_fs_closing(fs_info) &&
4250                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4251 }
4252
4253 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4254                                        struct btrfs_fs_info *fs_info)
4255 {
4256         u64 used;
4257
4258         spin_lock(&space_info->lock);
4259         used = space_info->bytes_used + space_info->bytes_reserved +
4260                space_info->bytes_pinned + space_info->bytes_readonly +
4261                space_info->bytes_may_use;
4262         if (need_do_async_reclaim(space_info, fs_info, used)) {
4263                 spin_unlock(&space_info->lock);
4264                 return 1;
4265         }
4266         spin_unlock(&space_info->lock);
4267
4268         return 0;
4269 }
4270
4271 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4272 {
4273         struct btrfs_fs_info *fs_info;
4274         struct btrfs_space_info *space_info;
4275         u64 to_reclaim;
4276         int flush_state;
4277
4278         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4279         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4280
4281         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4282                                                       space_info);
4283         if (!to_reclaim)
4284                 return;
4285
4286         flush_state = FLUSH_DELAYED_ITEMS_NR;
4287         do {
4288                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4289                             to_reclaim, flush_state);
4290                 flush_state++;
4291                 if (!btrfs_need_do_async_reclaim(space_info, fs_info))
4292                         return;
4293         } while (flush_state <= COMMIT_TRANS);
4294
4295         if (btrfs_need_do_async_reclaim(space_info, fs_info))
4296                 queue_work(system_unbound_wq, work);
4297 }
4298
4299 void btrfs_init_async_reclaim_work(struct work_struct *work)
4300 {
4301         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4302 }
4303
4304 /**
4305  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4306  * @root - the root we're allocating for
4307  * @block_rsv - the block_rsv we're allocating for
4308  * @orig_bytes - the number of bytes we want
4309  * @flush - whether or not we can flush to make our reservation
4310  *
4311  * This will reserve orgi_bytes number of bytes from the space info associated
4312  * with the block_rsv.  If there is not enough space it will make an attempt to
4313  * flush out space to make room.  It will do this by flushing delalloc if
4314  * possible or committing the transaction.  If flush is 0 then no attempts to
4315  * regain reservations will be made and this will fail if there is not enough
4316  * space already.
4317  */
4318 static int reserve_metadata_bytes(struct btrfs_root *root,
4319                                   struct btrfs_block_rsv *block_rsv,
4320                                   u64 orig_bytes,
4321                                   enum btrfs_reserve_flush_enum flush)
4322 {
4323         struct btrfs_space_info *space_info = block_rsv->space_info;
4324         u64 used;
4325         u64 num_bytes = orig_bytes;
4326         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4327         int ret = 0;
4328         bool flushing = false;
4329
4330 again:
4331         ret = 0;
4332         spin_lock(&space_info->lock);
4333         /*
4334          * We only want to wait if somebody other than us is flushing and we
4335          * are actually allowed to flush all things.
4336          */
4337         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4338                space_info->flush) {
4339                 spin_unlock(&space_info->lock);
4340                 /*
4341                  * If we have a trans handle we can't wait because the flusher
4342                  * may have to commit the transaction, which would mean we would
4343                  * deadlock since we are waiting for the flusher to finish, but
4344                  * hold the current transaction open.
4345                  */
4346                 if (current->journal_info)
4347                         return -EAGAIN;
4348                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4349                 /* Must have been killed, return */
4350                 if (ret)
4351                         return -EINTR;
4352
4353                 spin_lock(&space_info->lock);
4354         }
4355
4356         ret = -ENOSPC;
4357         used = space_info->bytes_used + space_info->bytes_reserved +
4358                 space_info->bytes_pinned + space_info->bytes_readonly +
4359                 space_info->bytes_may_use;
4360
4361         /*
4362          * The idea here is that we've not already over-reserved the block group
4363          * then we can go ahead and save our reservation first and then start
4364          * flushing if we need to.  Otherwise if we've already overcommitted
4365          * lets start flushing stuff first and then come back and try to make
4366          * our reservation.
4367          */
4368         if (used <= space_info->total_bytes) {
4369                 if (used + orig_bytes <= space_info->total_bytes) {
4370                         space_info->bytes_may_use += orig_bytes;
4371                         trace_btrfs_space_reservation(root->fs_info,
4372                                 "space_info", space_info->flags, orig_bytes, 1);
4373                         ret = 0;
4374                 } else {
4375                         /*
4376                          * Ok set num_bytes to orig_bytes since we aren't
4377                          * overocmmitted, this way we only try and reclaim what
4378                          * we need.
4379                          */
4380                         num_bytes = orig_bytes;
4381                 }
4382         } else {
4383                 /*
4384                  * Ok we're over committed, set num_bytes to the overcommitted
4385                  * amount plus the amount of bytes that we need for this
4386                  * reservation.
4387                  */
4388                 num_bytes = used - space_info->total_bytes +
4389                         (orig_bytes * 2);
4390         }
4391
4392         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4393                 space_info->bytes_may_use += orig_bytes;
4394                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4395                                               space_info->flags, orig_bytes,
4396                                               1);
4397                 ret = 0;
4398         }
4399
4400         /*
4401          * Couldn't make our reservation, save our place so while we're trying
4402          * to reclaim space we can actually use it instead of somebody else
4403          * stealing it from us.
4404          *
4405          * We make the other tasks wait for the flush only when we can flush
4406          * all things.
4407          */
4408         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4409                 flushing = true;
4410                 space_info->flush = 1;
4411         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4412                 used += orig_bytes;
4413                 if (need_do_async_reclaim(space_info, root->fs_info, used) &&
4414                     !work_busy(&root->fs_info->async_reclaim_work))
4415                         queue_work(system_unbound_wq,
4416                                    &root->fs_info->async_reclaim_work);
4417         }
4418         spin_unlock(&space_info->lock);
4419
4420         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4421                 goto out;
4422
4423         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4424                           flush_state);
4425         flush_state++;
4426
4427         /*
4428          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4429          * would happen. So skip delalloc flush.
4430          */
4431         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4432             (flush_state == FLUSH_DELALLOC ||
4433              flush_state == FLUSH_DELALLOC_WAIT))
4434                 flush_state = ALLOC_CHUNK;
4435
4436         if (!ret)
4437                 goto again;
4438         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4439                  flush_state < COMMIT_TRANS)
4440                 goto again;
4441         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4442                  flush_state <= COMMIT_TRANS)
4443                 goto again;
4444
4445 out:
4446         if (ret == -ENOSPC &&
4447             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4448                 struct btrfs_block_rsv *global_rsv =
4449                         &root->fs_info->global_block_rsv;
4450
4451                 if (block_rsv != global_rsv &&
4452                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4453                         ret = 0;
4454         }
4455         if (ret == -ENOSPC)
4456                 trace_btrfs_space_reservation(root->fs_info,
4457                                               "space_info:enospc",
4458                                               space_info->flags, orig_bytes, 1);
4459         if (flushing) {
4460                 spin_lock(&space_info->lock);
4461                 space_info->flush = 0;
4462                 wake_up_all(&space_info->wait);
4463                 spin_unlock(&space_info->lock);
4464         }
4465         return ret;
4466 }
4467
4468 static struct btrfs_block_rsv *get_block_rsv(
4469                                         const struct btrfs_trans_handle *trans,
4470                                         const struct btrfs_root *root)
4471 {
4472         struct btrfs_block_rsv *block_rsv = NULL;
4473
4474         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4475                 block_rsv = trans->block_rsv;
4476
4477         if (root == root->fs_info->csum_root && trans->adding_csums)
4478                 block_rsv = trans->block_rsv;
4479
4480         if (root == root->fs_info->uuid_root)
4481                 block_rsv = trans->block_rsv;
4482
4483         if (!block_rsv)
4484                 block_rsv = root->block_rsv;
4485
4486         if (!block_rsv)
4487                 block_rsv = &root->fs_info->empty_block_rsv;
4488
4489         return block_rsv;
4490 }
4491
4492 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4493                                u64 num_bytes)
4494 {
4495         int ret = -ENOSPC;
4496         spin_lock(&block_rsv->lock);
4497         if (block_rsv->reserved >= num_bytes) {
4498                 block_rsv->reserved -= num_bytes;
4499                 if (block_rsv->reserved < block_rsv->size)
4500                         block_rsv->full = 0;
4501                 ret = 0;
4502         }
4503         spin_unlock(&block_rsv->lock);
4504         return ret;
4505 }
4506
4507 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4508                                 u64 num_bytes, int update_size)
4509 {
4510         spin_lock(&block_rsv->lock);
4511         block_rsv->reserved += num_bytes;
4512         if (update_size)
4513                 block_rsv->size += num_bytes;
4514         else if (block_rsv->reserved >= block_rsv->size)
4515                 block_rsv->full = 1;
4516         spin_unlock(&block_rsv->lock);
4517 }
4518
4519 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4520                              struct btrfs_block_rsv *dest, u64 num_bytes,
4521                              int min_factor)
4522 {
4523         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4524         u64 min_bytes;
4525
4526         if (global_rsv->space_info != dest->space_info)
4527                 return -ENOSPC;
4528
4529         spin_lock(&global_rsv->lock);
4530         min_bytes = div_factor(global_rsv->size, min_factor);
4531         if (global_rsv->reserved < min_bytes + num_bytes) {
4532                 spin_unlock(&global_rsv->lock);
4533                 return -ENOSPC;
4534         }
4535         global_rsv->reserved -= num_bytes;
4536         if (global_rsv->reserved < global_rsv->size)
4537                 global_rsv->full = 0;
4538         spin_unlock(&global_rsv->lock);
4539
4540         block_rsv_add_bytes(dest, num_bytes, 1);
4541         return 0;
4542 }
4543
4544 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4545                                     struct btrfs_block_rsv *block_rsv,
4546                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4547 {
4548         struct btrfs_space_info *space_info = block_rsv->space_info;
4549
4550         spin_lock(&block_rsv->lock);
4551         if (num_bytes == (u64)-1)
4552                 num_bytes = block_rsv->size;
4553         block_rsv->size -= num_bytes;
4554         if (block_rsv->reserved >= block_rsv->size) {
4555                 num_bytes = block_rsv->reserved - block_rsv->size;
4556                 block_rsv->reserved = block_rsv->size;
4557                 block_rsv->full = 1;
4558         } else {
4559                 num_bytes = 0;
4560         }
4561         spin_unlock(&block_rsv->lock);
4562
4563         if (num_bytes > 0) {
4564                 if (dest) {
4565                         spin_lock(&dest->lock);
4566                         if (!dest->full) {
4567                                 u64 bytes_to_add;
4568
4569                                 bytes_to_add = dest->size - dest->reserved;
4570                                 bytes_to_add = min(num_bytes, bytes_to_add);
4571                                 dest->reserved += bytes_to_add;
4572                                 if (dest->reserved >= dest->size)
4573                                         dest->full = 1;
4574                                 num_bytes -= bytes_to_add;
4575                         }
4576                         spin_unlock(&dest->lock);
4577                 }
4578                 if (num_bytes) {
4579                         spin_lock(&space_info->lock);
4580                         space_info->bytes_may_use -= num_bytes;
4581                         trace_btrfs_space_reservation(fs_info, "space_info",
4582                                         space_info->flags, num_bytes, 0);
4583                         spin_unlock(&space_info->lock);
4584                 }
4585         }
4586 }
4587
4588 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4589                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4590 {
4591         int ret;
4592
4593         ret = block_rsv_use_bytes(src, num_bytes);
4594         if (ret)
4595                 return ret;
4596
4597         block_rsv_add_bytes(dst, num_bytes, 1);
4598         return 0;
4599 }
4600
4601 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4602 {
4603         memset(rsv, 0, sizeof(*rsv));
4604         spin_lock_init(&rsv->lock);
4605         rsv->type = type;
4606 }
4607
4608 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4609                                               unsigned short type)
4610 {
4611         struct btrfs_block_rsv *block_rsv;
4612         struct btrfs_fs_info *fs_info = root->fs_info;
4613
4614         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4615         if (!block_rsv)
4616                 return NULL;
4617
4618         btrfs_init_block_rsv(block_rsv, type);
4619         block_rsv->space_info = __find_space_info(fs_info,
4620                                                   BTRFS_BLOCK_GROUP_METADATA);
4621         return block_rsv;
4622 }
4623
4624 void btrfs_free_block_rsv(struct btrfs_root *root,
4625                           struct btrfs_block_rsv *rsv)
4626 {
4627         if (!rsv)
4628                 return;
4629         btrfs_block_rsv_release(root, rsv, (u64)-1);
4630         kfree(rsv);
4631 }
4632
4633 int btrfs_block_rsv_add(struct btrfs_root *root,
4634                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4635                         enum btrfs_reserve_flush_enum flush)
4636 {
4637         int ret;
4638
4639         if (num_bytes == 0)
4640                 return 0;
4641
4642         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4643         if (!ret) {
4644                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4645                 return 0;
4646         }
4647
4648         return ret;
4649 }
4650
4651 int btrfs_block_rsv_check(struct btrfs_root *root,
4652                           struct btrfs_block_rsv *block_rsv, int min_factor)
4653 {
4654         u64 num_bytes = 0;
4655         int ret = -ENOSPC;
4656
4657         if (!block_rsv)
4658                 return 0;
4659
4660         spin_lock(&block_rsv->lock);
4661         num_bytes = div_factor(block_rsv->size, min_factor);
4662         if (block_rsv->reserved >= num_bytes)
4663                 ret = 0;
4664         spin_unlock(&block_rsv->lock);
4665
4666         return ret;
4667 }
4668
4669 int btrfs_block_rsv_refill(struct btrfs_root *root,
4670                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4671                            enum btrfs_reserve_flush_enum flush)
4672 {
4673         u64 num_bytes = 0;
4674         int ret = -ENOSPC;
4675
4676         if (!block_rsv)
4677                 return 0;
4678
4679         spin_lock(&block_rsv->lock);
4680         num_bytes = min_reserved;
4681         if (block_rsv->reserved >= num_bytes)
4682                 ret = 0;
4683         else
4684                 num_bytes -= block_rsv->reserved;
4685         spin_unlock(&block_rsv->lock);
4686
4687         if (!ret)
4688                 return 0;
4689
4690         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4691         if (!ret) {
4692                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4693                 return 0;
4694         }
4695
4696         return ret;
4697 }
4698
4699 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4700                             struct btrfs_block_rsv *dst_rsv,
4701                             u64 num_bytes)
4702 {
4703         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4704 }
4705
4706 void btrfs_block_rsv_release(struct btrfs_root *root,
4707                              struct btrfs_block_rsv *block_rsv,
4708                              u64 num_bytes)
4709 {
4710         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4711         if (global_rsv == block_rsv ||
4712             block_rsv->space_info != global_rsv->space_info)
4713                 global_rsv = NULL;
4714         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4715                                 num_bytes);
4716 }
4717
4718 /*
4719  * helper to calculate size of global block reservation.
4720  * the desired value is sum of space used by extent tree,
4721  * checksum tree and root tree
4722  */
4723 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4724 {
4725         struct btrfs_space_info *sinfo;
4726         u64 num_bytes;
4727         u64 meta_used;
4728         u64 data_used;
4729         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4730
4731         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4732         spin_lock(&sinfo->lock);
4733         data_used = sinfo->bytes_used;
4734         spin_unlock(&sinfo->lock);
4735
4736         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4737         spin_lock(&sinfo->lock);
4738         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4739                 data_used = 0;
4740         meta_used = sinfo->bytes_used;
4741         spin_unlock(&sinfo->lock);
4742
4743         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4744                     csum_size * 2;
4745         num_bytes += div64_u64(data_used + meta_used, 50);
4746
4747         if (num_bytes * 3 > meta_used)
4748                 num_bytes = div64_u64(meta_used, 3);
4749
4750         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4751 }
4752
4753 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4754 {
4755         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4756         struct btrfs_space_info *sinfo = block_rsv->space_info;
4757         u64 num_bytes;
4758
4759         num_bytes = calc_global_metadata_size(fs_info);
4760
4761         spin_lock(&sinfo->lock);
4762         spin_lock(&block_rsv->lock);
4763
4764         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4765
4766         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4767                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4768                     sinfo->bytes_may_use;
4769
4770         if (sinfo->total_bytes > num_bytes) {
4771                 num_bytes = sinfo->total_bytes - num_bytes;
4772                 block_rsv->reserved += num_bytes;
4773                 sinfo->bytes_may_use += num_bytes;
4774                 trace_btrfs_space_reservation(fs_info, "space_info",
4775                                       sinfo->flags, num_bytes, 1);
4776         }
4777
4778         if (block_rsv->reserved >= block_rsv->size) {
4779                 num_bytes = block_rsv->reserved - block_rsv->size;
4780                 sinfo->bytes_may_use -= num_bytes;
4781                 trace_btrfs_space_reservation(fs_info, "space_info",
4782                                       sinfo->flags, num_bytes, 0);
4783                 block_rsv->reserved = block_rsv->size;
4784                 block_rsv->full = 1;
4785         }
4786
4787         spin_unlock(&block_rsv->lock);
4788         spin_unlock(&sinfo->lock);
4789 }
4790
4791 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4792 {
4793         struct btrfs_space_info *space_info;
4794
4795         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4796         fs_info->chunk_block_rsv.space_info = space_info;
4797
4798         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4799         fs_info->global_block_rsv.space_info = space_info;
4800         fs_info->delalloc_block_rsv.space_info = space_info;
4801         fs_info->trans_block_rsv.space_info = space_info;
4802         fs_info->empty_block_rsv.space_info = space_info;
4803         fs_info->delayed_block_rsv.space_info = space_info;
4804
4805         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4806         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4807         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4808         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4809         if (fs_info->quota_root)
4810                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4811         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4812
4813         update_global_block_rsv(fs_info);
4814 }
4815
4816 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4817 {
4818         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4819                                 (u64)-1);
4820         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4821         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4822         WARN_ON(fs_info->trans_block_rsv.size > 0);
4823         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4824         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4825         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4826         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4827         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4828 }
4829
4830 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4831                                   struct btrfs_root *root)
4832 {
4833         if (!trans->block_rsv)
4834                 return;
4835
4836         if (!trans->bytes_reserved)
4837                 return;
4838
4839         trace_btrfs_space_reservation(root->fs_info, "transaction",
4840                                       trans->transid, trans->bytes_reserved, 0);
4841         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4842         trans->bytes_reserved = 0;
4843 }
4844
4845 /* Can only return 0 or -ENOSPC */
4846 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4847                                   struct inode *inode)
4848 {
4849         struct btrfs_root *root = BTRFS_I(inode)->root;
4850         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4851         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4852
4853         /*
4854          * We need to hold space in order to delete our orphan item once we've
4855          * added it, so this takes the reservation so we can release it later
4856          * when we are truly done with the orphan item.
4857          */
4858         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4859         trace_btrfs_space_reservation(root->fs_info, "orphan",
4860                                       btrfs_ino(inode), num_bytes, 1);
4861         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4862 }
4863
4864 void btrfs_orphan_release_metadata(struct inode *inode)
4865 {
4866         struct btrfs_root *root = BTRFS_I(inode)->root;
4867         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4868         trace_btrfs_space_reservation(root->fs_info, "orphan",
4869                                       btrfs_ino(inode), num_bytes, 0);
4870         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4871 }
4872
4873 /*
4874  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4875  * root: the root of the parent directory
4876  * rsv: block reservation
4877  * items: the number of items that we need do reservation
4878  * qgroup_reserved: used to return the reserved size in qgroup
4879  *
4880  * This function is used to reserve the space for snapshot/subvolume
4881  * creation and deletion. Those operations are different with the
4882  * common file/directory operations, they change two fs/file trees
4883  * and root tree, the number of items that the qgroup reserves is
4884  * different with the free space reservation. So we can not use
4885  * the space reseravtion mechanism in start_transaction().
4886  */
4887 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4888                                      struct btrfs_block_rsv *rsv,
4889                                      int items,
4890                                      u64 *qgroup_reserved,
4891                                      bool use_global_rsv)
4892 {
4893         u64 num_bytes;
4894         int ret;
4895         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4896
4897         if (root->fs_info->quota_enabled) {
4898                 /* One for parent inode, two for dir entries */
4899                 num_bytes = 3 * root->leafsize;
4900                 ret = btrfs_qgroup_reserve(root, num_bytes);
4901                 if (ret)
4902                         return ret;
4903         } else {
4904                 num_bytes = 0;
4905         }
4906
4907         *qgroup_reserved = num_bytes;
4908
4909         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4910         rsv->space_info = __find_space_info(root->fs_info,
4911                                             BTRFS_BLOCK_GROUP_METADATA);
4912         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4913                                   BTRFS_RESERVE_FLUSH_ALL);
4914
4915         if (ret == -ENOSPC && use_global_rsv)
4916                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4917
4918         if (ret) {
4919                 if (*qgroup_reserved)
4920                         btrfs_qgroup_free(root, *qgroup_reserved);
4921         }
4922
4923         return ret;
4924 }
4925
4926 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4927                                       struct btrfs_block_rsv *rsv,
4928                                       u64 qgroup_reserved)
4929 {
4930         btrfs_block_rsv_release(root, rsv, (u64)-1);
4931         if (qgroup_reserved)
4932                 btrfs_qgroup_free(root, qgroup_reserved);
4933 }
4934
4935 /**
4936  * drop_outstanding_extent - drop an outstanding extent
4937  * @inode: the inode we're dropping the extent for
4938  *
4939  * This is called when we are freeing up an outstanding extent, either called
4940  * after an error or after an extent is written.  This will return the number of
4941  * reserved extents that need to be freed.  This must be called with
4942  * BTRFS_I(inode)->lock held.
4943  */
4944 static unsigned drop_outstanding_extent(struct inode *inode)
4945 {
4946         unsigned drop_inode_space = 0;
4947         unsigned dropped_extents = 0;
4948
4949         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4950         BTRFS_I(inode)->outstanding_extents--;
4951
4952         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4953             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4954                                &BTRFS_I(inode)->runtime_flags))
4955                 drop_inode_space = 1;
4956
4957         /*
4958          * If we have more or the same amount of outsanding extents than we have
4959          * reserved then we need to leave the reserved extents count alone.
4960          */
4961         if (BTRFS_I(inode)->outstanding_extents >=
4962             BTRFS_I(inode)->reserved_extents)
4963                 return drop_inode_space;
4964
4965         dropped_extents = BTRFS_I(inode)->reserved_extents -
4966                 BTRFS_I(inode)->outstanding_extents;
4967         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4968         return dropped_extents + drop_inode_space;
4969 }
4970
4971 /**
4972  * calc_csum_metadata_size - return the amount of metada space that must be
4973  *      reserved/free'd for the given bytes.
4974  * @inode: the inode we're manipulating
4975  * @num_bytes: the number of bytes in question
4976  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4977  *
4978  * This adjusts the number of csum_bytes in the inode and then returns the
4979  * correct amount of metadata that must either be reserved or freed.  We
4980  * calculate how many checksums we can fit into one leaf and then divide the
4981  * number of bytes that will need to be checksumed by this value to figure out
4982  * how many checksums will be required.  If we are adding bytes then the number
4983  * may go up and we will return the number of additional bytes that must be
4984  * reserved.  If it is going down we will return the number of bytes that must
4985  * be freed.
4986  *
4987  * This must be called with BTRFS_I(inode)->lock held.
4988  */
4989 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4990                                    int reserve)
4991 {
4992         struct btrfs_root *root = BTRFS_I(inode)->root;
4993         u64 csum_size;
4994         int num_csums_per_leaf;
4995         int num_csums;
4996         int old_csums;
4997
4998         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4999             BTRFS_I(inode)->csum_bytes == 0)
5000                 return 0;
5001
5002         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5003         if (reserve)
5004                 BTRFS_I(inode)->csum_bytes += num_bytes;
5005         else
5006                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5007         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5008         num_csums_per_leaf = (int)div64_u64(csum_size,
5009                                             sizeof(struct btrfs_csum_item) +
5010                                             sizeof(struct btrfs_disk_key));
5011         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5012         num_csums = num_csums + num_csums_per_leaf - 1;
5013         num_csums = num_csums / num_csums_per_leaf;
5014
5015         old_csums = old_csums + num_csums_per_leaf - 1;
5016         old_csums = old_csums / num_csums_per_leaf;
5017
5018         /* No change, no need to reserve more */
5019         if (old_csums == num_csums)
5020                 return 0;
5021
5022         if (reserve)
5023                 return btrfs_calc_trans_metadata_size(root,
5024                                                       num_csums - old_csums);
5025
5026         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5027 }
5028
5029 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5030 {
5031         struct btrfs_root *root = BTRFS_I(inode)->root;
5032         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5033         u64 to_reserve = 0;
5034         u64 csum_bytes;
5035         unsigned nr_extents = 0;
5036         int extra_reserve = 0;
5037         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5038         int ret = 0;
5039         bool delalloc_lock = true;
5040         u64 to_free = 0;
5041         unsigned dropped;
5042
5043         /* If we are a free space inode we need to not flush since we will be in
5044          * the middle of a transaction commit.  We also don't need the delalloc
5045          * mutex since we won't race with anybody.  We need this mostly to make
5046          * lockdep shut its filthy mouth.
5047          */
5048         if (btrfs_is_free_space_inode(inode)) {
5049                 flush = BTRFS_RESERVE_NO_FLUSH;
5050                 delalloc_lock = false;
5051         }
5052
5053         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5054             btrfs_transaction_in_commit(root->fs_info))
5055                 schedule_timeout(1);
5056
5057         if (delalloc_lock)
5058                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5059
5060         num_bytes = ALIGN(num_bytes, root->sectorsize);
5061
5062         spin_lock(&BTRFS_I(inode)->lock);
5063         BTRFS_I(inode)->outstanding_extents++;
5064
5065         if (BTRFS_I(inode)->outstanding_extents >
5066             BTRFS_I(inode)->reserved_extents)
5067                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5068                         BTRFS_I(inode)->reserved_extents;
5069
5070         /*
5071          * Add an item to reserve for updating the inode when we complete the
5072          * delalloc io.
5073          */
5074         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5075                       &BTRFS_I(inode)->runtime_flags)) {
5076                 nr_extents++;
5077                 extra_reserve = 1;
5078         }
5079
5080         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5081         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5082         csum_bytes = BTRFS_I(inode)->csum_bytes;
5083         spin_unlock(&BTRFS_I(inode)->lock);
5084
5085         if (root->fs_info->quota_enabled) {
5086                 ret = btrfs_qgroup_reserve(root, num_bytes +
5087                                            nr_extents * root->leafsize);
5088                 if (ret)
5089                         goto out_fail;
5090         }
5091
5092         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5093         if (unlikely(ret)) {
5094                 if (root->fs_info->quota_enabled)
5095                         btrfs_qgroup_free(root, num_bytes +
5096                                                 nr_extents * root->leafsize);
5097                 goto out_fail;
5098         }
5099
5100         spin_lock(&BTRFS_I(inode)->lock);
5101         if (extra_reserve) {
5102                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5103                         &BTRFS_I(inode)->runtime_flags);
5104                 nr_extents--;
5105         }
5106         BTRFS_I(inode)->reserved_extents += nr_extents;
5107         spin_unlock(&BTRFS_I(inode)->lock);
5108
5109         if (delalloc_lock)
5110                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5111
5112         if (to_reserve)
5113                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5114                                               btrfs_ino(inode), to_reserve, 1);
5115         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5116
5117         return 0;
5118
5119 out_fail:
5120         spin_lock(&BTRFS_I(inode)->lock);
5121         dropped = drop_outstanding_extent(inode);
5122         /*
5123          * If the inodes csum_bytes is the same as the original
5124          * csum_bytes then we know we haven't raced with any free()ers
5125          * so we can just reduce our inodes csum bytes and carry on.
5126          */
5127         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5128                 calc_csum_metadata_size(inode, num_bytes, 0);
5129         } else {
5130                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5131                 u64 bytes;
5132
5133                 /*
5134                  * This is tricky, but first we need to figure out how much we
5135                  * free'd from any free-ers that occured during this
5136                  * reservation, so we reset ->csum_bytes to the csum_bytes
5137                  * before we dropped our lock, and then call the free for the
5138                  * number of bytes that were freed while we were trying our
5139                  * reservation.
5140                  */
5141                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5142                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5143                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5144
5145
5146                 /*
5147                  * Now we need to see how much we would have freed had we not
5148                  * been making this reservation and our ->csum_bytes were not
5149                  * artificially inflated.
5150                  */
5151                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5152                 bytes = csum_bytes - orig_csum_bytes;
5153                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5154
5155                 /*
5156                  * Now reset ->csum_bytes to what it should be.  If bytes is
5157                  * more than to_free then we would have free'd more space had we
5158                  * not had an artificially high ->csum_bytes, so we need to free
5159                  * the remainder.  If bytes is the same or less then we don't
5160                  * need to do anything, the other free-ers did the correct
5161                  * thing.
5162                  */
5163                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5164                 if (bytes > to_free)
5165                         to_free = bytes - to_free;
5166                 else
5167                         to_free = 0;
5168         }
5169         spin_unlock(&BTRFS_I(inode)->lock);
5170         if (dropped)
5171                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5172
5173         if (to_free) {
5174                 btrfs_block_rsv_release(root, block_rsv, to_free);
5175                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5176                                               btrfs_ino(inode), to_free, 0);
5177         }
5178         if (delalloc_lock)
5179                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5180         return ret;
5181 }
5182
5183 /**
5184  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5185  * @inode: the inode to release the reservation for
5186  * @num_bytes: the number of bytes we're releasing
5187  *
5188  * This will release the metadata reservation for an inode.  This can be called
5189  * once we complete IO for a given set of bytes to release their metadata
5190  * reservations.
5191  */
5192 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5193 {
5194         struct btrfs_root *root = BTRFS_I(inode)->root;
5195         u64 to_free = 0;
5196         unsigned dropped;
5197
5198         num_bytes = ALIGN(num_bytes, root->sectorsize);
5199         spin_lock(&BTRFS_I(inode)->lock);
5200         dropped = drop_outstanding_extent(inode);
5201
5202         if (num_bytes)
5203                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5204         spin_unlock(&BTRFS_I(inode)->lock);
5205         if (dropped > 0)
5206                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5207
5208         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5209                                       btrfs_ino(inode), to_free, 0);
5210         if (root->fs_info->quota_enabled) {
5211                 btrfs_qgroup_free(root, num_bytes +
5212                                         dropped * root->leafsize);
5213         }
5214
5215         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5216                                 to_free);
5217 }
5218
5219 /**
5220  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5221  * @inode: inode we're writing to
5222  * @num_bytes: the number of bytes we want to allocate
5223  *
5224  * This will do the following things
5225  *
5226  * o reserve space in the data space info for num_bytes
5227  * o reserve space in the metadata space info based on number of outstanding
5228  *   extents and how much csums will be needed
5229  * o add to the inodes ->delalloc_bytes
5230  * o add it to the fs_info's delalloc inodes list.
5231  *
5232  * This will return 0 for success and -ENOSPC if there is no space left.
5233  */
5234 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5235 {
5236         int ret;
5237
5238         ret = btrfs_check_data_free_space(inode, num_bytes);
5239         if (ret)
5240                 return ret;
5241
5242         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5243         if (ret) {
5244                 btrfs_free_reserved_data_space(inode, num_bytes);
5245                 return ret;
5246         }
5247
5248         return 0;
5249 }
5250
5251 /**
5252  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5253  * @inode: inode we're releasing space for
5254  * @num_bytes: the number of bytes we want to free up
5255  *
5256  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5257  * called in the case that we don't need the metadata AND data reservations
5258  * anymore.  So if there is an error or we insert an inline extent.
5259  *
5260  * This function will release the metadata space that was not used and will
5261  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5262  * list if there are no delalloc bytes left.
5263  */
5264 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5265 {
5266         btrfs_delalloc_release_metadata(inode, num_bytes);
5267         btrfs_free_reserved_data_space(inode, num_bytes);
5268 }
5269
5270 static int update_block_group(struct btrfs_root *root,
5271                               u64 bytenr, u64 num_bytes, int alloc)
5272 {
5273         struct btrfs_block_group_cache *cache = NULL;
5274         struct btrfs_fs_info *info = root->fs_info;
5275         u64 total = num_bytes;
5276         u64 old_val;
5277         u64 byte_in_group;
5278         int factor;
5279
5280         /* block accounting for super block */
5281         spin_lock(&info->delalloc_root_lock);
5282         old_val = btrfs_super_bytes_used(info->super_copy);
5283         if (alloc)
5284                 old_val += num_bytes;
5285         else
5286                 old_val -= num_bytes;
5287         btrfs_set_super_bytes_used(info->super_copy, old_val);
5288         spin_unlock(&info->delalloc_root_lock);
5289
5290         while (total) {
5291                 cache = btrfs_lookup_block_group(info, bytenr);
5292                 if (!cache)
5293                         return -ENOENT;
5294                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5295                                     BTRFS_BLOCK_GROUP_RAID1 |
5296                                     BTRFS_BLOCK_GROUP_RAID10))
5297                         factor = 2;
5298                 else
5299                         factor = 1;
5300                 /*
5301                  * If this block group has free space cache written out, we
5302                  * need to make sure to load it if we are removing space.  This
5303                  * is because we need the unpinning stage to actually add the
5304                  * space back to the block group, otherwise we will leak space.
5305                  */
5306                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5307                         cache_block_group(cache, 1);
5308
5309                 byte_in_group = bytenr - cache->key.objectid;
5310                 WARN_ON(byte_in_group > cache->key.offset);
5311
5312                 spin_lock(&cache->space_info->lock);
5313                 spin_lock(&cache->lock);
5314
5315                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5316                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5317                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5318
5319                 cache->dirty = 1;
5320                 old_val = btrfs_block_group_used(&cache->item);
5321                 num_bytes = min(total, cache->key.offset - byte_in_group);
5322                 if (alloc) {
5323                         old_val += num_bytes;
5324                         btrfs_set_block_group_used(&cache->item, old_val);
5325                         cache->reserved -= num_bytes;
5326                         cache->space_info->bytes_reserved -= num_bytes;
5327                         cache->space_info->bytes_used += num_bytes;
5328                         cache->space_info->disk_used += num_bytes * factor;
5329                         spin_unlock(&cache->lock);
5330                         spin_unlock(&cache->space_info->lock);
5331                 } else {
5332                         old_val -= num_bytes;
5333                         btrfs_set_block_group_used(&cache->item, old_val);
5334                         cache->pinned += num_bytes;
5335                         cache->space_info->bytes_pinned += num_bytes;
5336                         cache->space_info->bytes_used -= num_bytes;
5337                         cache->space_info->disk_used -= num_bytes * factor;
5338                         spin_unlock(&cache->lock);
5339                         spin_unlock(&cache->space_info->lock);
5340
5341                         set_extent_dirty(info->pinned_extents,
5342                                          bytenr, bytenr + num_bytes - 1,
5343                                          GFP_NOFS | __GFP_NOFAIL);
5344                 }
5345                 btrfs_put_block_group(cache);
5346                 total -= num_bytes;
5347                 bytenr += num_bytes;
5348         }
5349         return 0;
5350 }
5351
5352 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5353 {
5354         struct btrfs_block_group_cache *cache;
5355         u64 bytenr;
5356
5357         spin_lock(&root->fs_info->block_group_cache_lock);
5358         bytenr = root->fs_info->first_logical_byte;
5359         spin_unlock(&root->fs_info->block_group_cache_lock);
5360
5361         if (bytenr < (u64)-1)
5362                 return bytenr;
5363
5364         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5365         if (!cache)
5366                 return 0;
5367
5368         bytenr = cache->key.objectid;
5369         btrfs_put_block_group(cache);
5370
5371         return bytenr;
5372 }
5373
5374 static int pin_down_extent(struct btrfs_root *root,
5375                            struct btrfs_block_group_cache *cache,
5376                            u64 bytenr, u64 num_bytes, int reserved)
5377 {
5378         spin_lock(&cache->space_info->lock);
5379         spin_lock(&cache->lock);
5380         cache->pinned += num_bytes;
5381         cache->space_info->bytes_pinned += num_bytes;
5382         if (reserved) {
5383                 cache->reserved -= num_bytes;
5384                 cache->space_info->bytes_reserved -= num_bytes;
5385         }
5386         spin_unlock(&cache->lock);
5387         spin_unlock(&cache->space_info->lock);
5388
5389         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5390                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5391         if (reserved)
5392                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5393         return 0;
5394 }
5395
5396 /*
5397  * this function must be called within transaction
5398  */
5399 int btrfs_pin_extent(struct btrfs_root *root,
5400                      u64 bytenr, u64 num_bytes, int reserved)
5401 {
5402         struct btrfs_block_group_cache *cache;
5403
5404         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5405         BUG_ON(!cache); /* Logic error */
5406
5407         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5408
5409         btrfs_put_block_group(cache);
5410         return 0;
5411 }
5412
5413 /*
5414  * this function must be called within transaction
5415  */
5416 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5417                                     u64 bytenr, u64 num_bytes)
5418 {
5419         struct btrfs_block_group_cache *cache;
5420         int ret;
5421
5422         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5423         if (!cache)
5424                 return -EINVAL;
5425
5426         /*
5427          * pull in the free space cache (if any) so that our pin
5428          * removes the free space from the cache.  We have load_only set
5429          * to one because the slow code to read in the free extents does check
5430          * the pinned extents.
5431          */
5432         cache_block_group(cache, 1);
5433
5434         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5435
5436         /* remove us from the free space cache (if we're there at all) */
5437         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5438         btrfs_put_block_group(cache);
5439         return ret;
5440 }
5441
5442 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5443 {
5444         int ret;
5445         struct btrfs_block_group_cache *block_group;
5446         struct btrfs_caching_control *caching_ctl;
5447
5448         block_group = btrfs_lookup_block_group(root->fs_info, start);
5449         if (!block_group)
5450                 return -EINVAL;
5451
5452         cache_block_group(block_group, 0);
5453         caching_ctl = get_caching_control(block_group);
5454
5455         if (!caching_ctl) {
5456                 /* Logic error */
5457                 BUG_ON(!block_group_cache_done(block_group));
5458                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5459         } else {
5460                 mutex_lock(&caching_ctl->mutex);
5461
5462                 if (start >= caching_ctl->progress) {
5463                         ret = add_excluded_extent(root, start, num_bytes);
5464                 } else if (start + num_bytes <= caching_ctl->progress) {
5465                         ret = btrfs_remove_free_space(block_group,
5466                                                       start, num_bytes);
5467                 } else {
5468                         num_bytes = caching_ctl->progress - start;
5469                         ret = btrfs_remove_free_space(block_group,
5470                                                       start, num_bytes);
5471                         if (ret)
5472                                 goto out_lock;
5473
5474                         num_bytes = (start + num_bytes) -
5475                                 caching_ctl->progress;
5476                         start = caching_ctl->progress;
5477                         ret = add_excluded_extent(root, start, num_bytes);
5478                 }
5479 out_lock:
5480                 mutex_unlock(&caching_ctl->mutex);
5481                 put_caching_control(caching_ctl);
5482         }
5483         btrfs_put_block_group(block_group);
5484         return ret;
5485 }
5486
5487 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5488                                  struct extent_buffer *eb)
5489 {
5490         struct btrfs_file_extent_item *item;
5491         struct btrfs_key key;
5492         int found_type;
5493         int i;
5494
5495         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5496                 return 0;
5497
5498         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5499                 btrfs_item_key_to_cpu(eb, &key, i);
5500                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5501                         continue;
5502                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5503                 found_type = btrfs_file_extent_type(eb, item);
5504                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5505                         continue;
5506                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5507                         continue;
5508                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5509                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5510                 __exclude_logged_extent(log, key.objectid, key.offset);
5511         }
5512
5513         return 0;
5514 }
5515
5516 /**
5517  * btrfs_update_reserved_bytes - update the block_group and space info counters
5518  * @cache:      The cache we are manipulating
5519  * @num_bytes:  The number of bytes in question
5520  * @reserve:    One of the reservation enums
5521  *
5522  * This is called by the allocator when it reserves space, or by somebody who is
5523  * freeing space that was never actually used on disk.  For example if you
5524  * reserve some space for a new leaf in transaction A and before transaction A
5525  * commits you free that leaf, you call this with reserve set to 0 in order to
5526  * clear the reservation.
5527  *
5528  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5529  * ENOSPC accounting.  For data we handle the reservation through clearing the
5530  * delalloc bits in the io_tree.  We have to do this since we could end up
5531  * allocating less disk space for the amount of data we have reserved in the
5532  * case of compression.
5533  *
5534  * If this is a reservation and the block group has become read only we cannot
5535  * make the reservation and return -EAGAIN, otherwise this function always
5536  * succeeds.
5537  */
5538 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5539                                        u64 num_bytes, int reserve)
5540 {
5541         struct btrfs_space_info *space_info = cache->space_info;
5542         int ret = 0;
5543
5544         spin_lock(&space_info->lock);
5545         spin_lock(&cache->lock);
5546         if (reserve != RESERVE_FREE) {
5547                 if (cache->ro) {
5548                         ret = -EAGAIN;
5549                 } else {
5550                         cache->reserved += num_bytes;
5551                         space_info->bytes_reserved += num_bytes;
5552                         if (reserve == RESERVE_ALLOC) {
5553                                 trace_btrfs_space_reservation(cache->fs_info,
5554                                                 "space_info", space_info->flags,
5555                                                 num_bytes, 0);
5556                                 space_info->bytes_may_use -= num_bytes;
5557                         }
5558                 }
5559         } else {
5560                 if (cache->ro)
5561                         space_info->bytes_readonly += num_bytes;
5562                 cache->reserved -= num_bytes;
5563                 space_info->bytes_reserved -= num_bytes;
5564         }
5565         spin_unlock(&cache->lock);
5566         spin_unlock(&space_info->lock);
5567         return ret;
5568 }
5569
5570 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5571                                 struct btrfs_root *root)
5572 {
5573         struct btrfs_fs_info *fs_info = root->fs_info;
5574         struct btrfs_caching_control *next;
5575         struct btrfs_caching_control *caching_ctl;
5576         struct btrfs_block_group_cache *cache;
5577         struct btrfs_space_info *space_info;
5578
5579         down_write(&fs_info->commit_root_sem);
5580
5581         list_for_each_entry_safe(caching_ctl, next,
5582                                  &fs_info->caching_block_groups, list) {
5583                 cache = caching_ctl->block_group;
5584                 if (block_group_cache_done(cache)) {
5585                         cache->last_byte_to_unpin = (u64)-1;
5586                         list_del_init(&caching_ctl->list);
5587                         put_caching_control(caching_ctl);
5588                 } else {
5589                         cache->last_byte_to_unpin = caching_ctl->progress;
5590                 }
5591         }
5592
5593         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5594                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5595         else
5596                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5597
5598         up_write(&fs_info->commit_root_sem);
5599
5600         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5601                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5602
5603         update_global_block_rsv(fs_info);
5604 }
5605
5606 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5607 {
5608         struct btrfs_fs_info *fs_info = root->fs_info;
5609         struct btrfs_block_group_cache *cache = NULL;
5610         struct btrfs_space_info *space_info;
5611         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5612         u64 len;
5613         bool readonly;
5614
5615         while (start <= end) {
5616                 readonly = false;
5617                 if (!cache ||
5618                     start >= cache->key.objectid + cache->key.offset) {
5619                         if (cache)
5620                                 btrfs_put_block_group(cache);
5621                         cache = btrfs_lookup_block_group(fs_info, start);
5622                         BUG_ON(!cache); /* Logic error */
5623                 }
5624
5625                 len = cache->key.objectid + cache->key.offset - start;
5626                 len = min(len, end + 1 - start);
5627
5628                 if (start < cache->last_byte_to_unpin) {
5629                         len = min(len, cache->last_byte_to_unpin - start);
5630                         btrfs_add_free_space(cache, start, len);
5631                 }
5632
5633                 start += len;
5634                 space_info = cache->space_info;
5635
5636                 spin_lock(&space_info->lock);
5637                 spin_lock(&cache->lock);
5638                 cache->pinned -= len;
5639                 space_info->bytes_pinned -= len;
5640                 if (cache->ro) {
5641                         space_info->bytes_readonly += len;
5642                         readonly = true;
5643                 }
5644                 spin_unlock(&cache->lock);
5645                 if (!readonly && global_rsv->space_info == space_info) {
5646                         spin_lock(&global_rsv->lock);
5647                         if (!global_rsv->full) {
5648                                 len = min(len, global_rsv->size -
5649                                           global_rsv->reserved);
5650                                 global_rsv->reserved += len;
5651                                 space_info->bytes_may_use += len;
5652                                 if (global_rsv->reserved >= global_rsv->size)
5653                                         global_rsv->full = 1;
5654                         }
5655                         spin_unlock(&global_rsv->lock);
5656                 }
5657                 spin_unlock(&space_info->lock);
5658         }
5659
5660         if (cache)
5661                 btrfs_put_block_group(cache);
5662         return 0;
5663 }
5664
5665 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5666                                struct btrfs_root *root)
5667 {
5668         struct btrfs_fs_info *fs_info = root->fs_info;
5669         struct extent_io_tree *unpin;
5670         u64 start;
5671         u64 end;
5672         int ret;
5673
5674         if (trans->aborted)
5675                 return 0;
5676
5677         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5678                 unpin = &fs_info->freed_extents[1];
5679         else
5680                 unpin = &fs_info->freed_extents[0];
5681
5682         while (1) {
5683                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5684                                             EXTENT_DIRTY, NULL);
5685                 if (ret)
5686                         break;
5687
5688                 if (btrfs_test_opt(root, DISCARD))
5689                         ret = btrfs_discard_extent(root, start,
5690                                                    end + 1 - start, NULL);
5691
5692                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5693                 unpin_extent_range(root, start, end);
5694                 cond_resched();
5695         }
5696
5697         return 0;
5698 }
5699
5700 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5701                              u64 owner, u64 root_objectid)
5702 {
5703         struct btrfs_space_info *space_info;
5704         u64 flags;
5705
5706         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5707                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5708                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5709                 else
5710                         flags = BTRFS_BLOCK_GROUP_METADATA;
5711         } else {
5712                 flags = BTRFS_BLOCK_GROUP_DATA;
5713         }
5714
5715         space_info = __find_space_info(fs_info, flags);
5716         BUG_ON(!space_info); /* Logic bug */
5717         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5718 }
5719
5720
5721 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5722                                 struct btrfs_root *root,
5723                                 u64 bytenr, u64 num_bytes, u64 parent,
5724                                 u64 root_objectid, u64 owner_objectid,
5725                                 u64 owner_offset, int refs_to_drop,
5726                                 struct btrfs_delayed_extent_op *extent_op)
5727 {
5728         struct btrfs_key key;
5729         struct btrfs_path *path;
5730         struct btrfs_fs_info *info = root->fs_info;
5731         struct btrfs_root *extent_root = info->extent_root;
5732         struct extent_buffer *leaf;
5733         struct btrfs_extent_item *ei;
5734         struct btrfs_extent_inline_ref *iref;
5735         int ret;
5736         int is_data;
5737         int extent_slot = 0;
5738         int found_extent = 0;
5739         int num_to_del = 1;
5740         u32 item_size;
5741         u64 refs;
5742         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5743                                                  SKINNY_METADATA);
5744
5745         path = btrfs_alloc_path();
5746         if (!path)
5747                 return -ENOMEM;
5748
5749         path->reada = 1;
5750         path->leave_spinning = 1;
5751
5752         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5753         BUG_ON(!is_data && refs_to_drop != 1);
5754
5755         if (is_data)
5756                 skinny_metadata = 0;
5757
5758         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5759                                     bytenr, num_bytes, parent,
5760                                     root_objectid, owner_objectid,
5761                                     owner_offset);
5762         if (ret == 0) {
5763                 extent_slot = path->slots[0];
5764                 while (extent_slot >= 0) {
5765                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5766                                               extent_slot);
5767                         if (key.objectid != bytenr)
5768                                 break;
5769                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5770                             key.offset == num_bytes) {
5771                                 found_extent = 1;
5772                                 break;
5773                         }
5774                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5775                             key.offset == owner_objectid) {
5776                                 found_extent = 1;
5777                                 break;
5778                         }
5779                         if (path->slots[0] - extent_slot > 5)
5780                                 break;
5781                         extent_slot--;
5782                 }
5783 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5784                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5785                 if (found_extent && item_size < sizeof(*ei))
5786                         found_extent = 0;
5787 #endif
5788                 if (!found_extent) {
5789                         BUG_ON(iref);
5790                         ret = remove_extent_backref(trans, extent_root, path,
5791                                                     NULL, refs_to_drop,
5792                                                     is_data);
5793                         if (ret) {
5794                                 btrfs_abort_transaction(trans, extent_root, ret);
5795                                 goto out;
5796                         }
5797                         btrfs_release_path(path);
5798                         path->leave_spinning = 1;
5799
5800                         key.objectid = bytenr;
5801                         key.type = BTRFS_EXTENT_ITEM_KEY;
5802                         key.offset = num_bytes;
5803
5804                         if (!is_data && skinny_metadata) {
5805                                 key.type = BTRFS_METADATA_ITEM_KEY;
5806                                 key.offset = owner_objectid;
5807                         }
5808
5809                         ret = btrfs_search_slot(trans, extent_root,
5810                                                 &key, path, -1, 1);
5811                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5812                                 /*
5813                                  * Couldn't find our skinny metadata item,
5814                                  * see if we have ye olde extent item.
5815                                  */
5816                                 path->slots[0]--;
5817                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5818                                                       path->slots[0]);
5819                                 if (key.objectid == bytenr &&
5820                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5821                                     key.offset == num_bytes)
5822                                         ret = 0;
5823                         }
5824
5825                         if (ret > 0 && skinny_metadata) {
5826                                 skinny_metadata = false;
5827                                 key.objectid = bytenr;
5828                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5829                                 key.offset = num_bytes;
5830                                 btrfs_release_path(path);
5831                                 ret = btrfs_search_slot(trans, extent_root,
5832                                                         &key, path, -1, 1);
5833                         }
5834
5835                         if (ret) {
5836                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5837                                         ret, bytenr);
5838                                 if (ret > 0)
5839                                         btrfs_print_leaf(extent_root,
5840                                                          path->nodes[0]);
5841                         }
5842                         if (ret < 0) {
5843                                 btrfs_abort_transaction(trans, extent_root, ret);
5844                                 goto out;
5845                         }
5846                         extent_slot = path->slots[0];
5847                 }
5848         } else if (WARN_ON(ret == -ENOENT)) {
5849                 btrfs_print_leaf(extent_root, path->nodes[0]);
5850                 btrfs_err(info,
5851                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5852                         bytenr, parent, root_objectid, owner_objectid,
5853                         owner_offset);
5854                 btrfs_abort_transaction(trans, extent_root, ret);
5855                 goto out;
5856         } else {
5857                 btrfs_abort_transaction(trans, extent_root, ret);
5858                 goto out;
5859         }
5860
5861         leaf = path->nodes[0];
5862         item_size = btrfs_item_size_nr(leaf, extent_slot);
5863 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5864         if (item_size < sizeof(*ei)) {
5865                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5866                 ret = convert_extent_item_v0(trans, extent_root, path,
5867                                              owner_objectid, 0);
5868                 if (ret < 0) {
5869                         btrfs_abort_transaction(trans, extent_root, ret);
5870                         goto out;
5871                 }
5872
5873                 btrfs_release_path(path);
5874                 path->leave_spinning = 1;
5875
5876                 key.objectid = bytenr;
5877                 key.type = BTRFS_EXTENT_ITEM_KEY;
5878                 key.offset = num_bytes;
5879
5880                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5881                                         -1, 1);
5882                 if (ret) {
5883                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5884                                 ret, bytenr);
5885                         btrfs_print_leaf(extent_root, path->nodes[0]);
5886                 }
5887                 if (ret < 0) {
5888                         btrfs_abort_transaction(trans, extent_root, ret);
5889                         goto out;
5890                 }
5891
5892                 extent_slot = path->slots[0];
5893                 leaf = path->nodes[0];
5894                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5895         }
5896 #endif
5897         BUG_ON(item_size < sizeof(*ei));
5898         ei = btrfs_item_ptr(leaf, extent_slot,
5899                             struct btrfs_extent_item);
5900         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5901             key.type == BTRFS_EXTENT_ITEM_KEY) {
5902                 struct btrfs_tree_block_info *bi;
5903                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5904                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5905                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5906         }
5907
5908         refs = btrfs_extent_refs(leaf, ei);
5909         if (refs < refs_to_drop) {
5910                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5911                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5912                 ret = -EINVAL;
5913                 btrfs_abort_transaction(trans, extent_root, ret);
5914                 goto out;
5915         }
5916         refs -= refs_to_drop;
5917
5918         if (refs > 0) {
5919                 if (extent_op)
5920                         __run_delayed_extent_op(extent_op, leaf, ei);
5921                 /*
5922                  * In the case of inline back ref, reference count will
5923                  * be updated by remove_extent_backref
5924                  */
5925                 if (iref) {
5926                         BUG_ON(!found_extent);
5927                 } else {
5928                         btrfs_set_extent_refs(leaf, ei, refs);
5929                         btrfs_mark_buffer_dirty(leaf);
5930                 }
5931                 if (found_extent) {
5932                         ret = remove_extent_backref(trans, extent_root, path,
5933                                                     iref, refs_to_drop,
5934                                                     is_data);
5935                         if (ret) {
5936                                 btrfs_abort_transaction(trans, extent_root, ret);
5937                                 goto out;
5938                         }
5939                 }
5940                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5941                                  root_objectid);
5942         } else {
5943                 if (found_extent) {
5944                         BUG_ON(is_data && refs_to_drop !=
5945                                extent_data_ref_count(root, path, iref));
5946                         if (iref) {
5947                                 BUG_ON(path->slots[0] != extent_slot);
5948                         } else {
5949                                 BUG_ON(path->slots[0] != extent_slot + 1);
5950                                 path->slots[0] = extent_slot;
5951                                 num_to_del = 2;
5952                         }
5953                 }
5954
5955                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5956                                       num_to_del);
5957                 if (ret) {
5958                         btrfs_abort_transaction(trans, extent_root, ret);
5959                         goto out;
5960                 }
5961                 btrfs_release_path(path);
5962
5963                 if (is_data) {
5964                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5965                         if (ret) {
5966                                 btrfs_abort_transaction(trans, extent_root, ret);
5967                                 goto out;
5968                         }
5969                 }
5970
5971                 ret = update_block_group(root, bytenr, num_bytes, 0);
5972                 if (ret) {
5973                         btrfs_abort_transaction(trans, extent_root, ret);
5974                         goto out;
5975                 }
5976         }
5977 out:
5978         btrfs_free_path(path);
5979         return ret;
5980 }
5981
5982 /*
5983  * when we free an block, it is possible (and likely) that we free the last
5984  * delayed ref for that extent as well.  This searches the delayed ref tree for
5985  * a given extent, and if there are no other delayed refs to be processed, it
5986  * removes it from the tree.
5987  */
5988 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5989                                       struct btrfs_root *root, u64 bytenr)
5990 {
5991         struct btrfs_delayed_ref_head *head;
5992         struct btrfs_delayed_ref_root *delayed_refs;
5993         int ret = 0;
5994
5995         delayed_refs = &trans->transaction->delayed_refs;
5996         spin_lock(&delayed_refs->lock);
5997         head = btrfs_find_delayed_ref_head(trans, bytenr);
5998         if (!head)
5999                 goto out_delayed_unlock;
6000
6001         spin_lock(&head->lock);
6002         if (rb_first(&head->ref_root))
6003                 goto out;
6004
6005         if (head->extent_op) {
6006                 if (!head->must_insert_reserved)
6007                         goto out;
6008                 btrfs_free_delayed_extent_op(head->extent_op);
6009                 head->extent_op = NULL;
6010         }
6011
6012         /*
6013          * waiting for the lock here would deadlock.  If someone else has it
6014          * locked they are already in the process of dropping it anyway
6015          */
6016         if (!mutex_trylock(&head->mutex))
6017                 goto out;
6018
6019         /*
6020          * at this point we have a head with no other entries.  Go
6021          * ahead and process it.
6022          */
6023         head->node.in_tree = 0;
6024         rb_erase(&head->href_node, &delayed_refs->href_root);
6025
6026         atomic_dec(&delayed_refs->num_entries);
6027
6028         /*
6029          * we don't take a ref on the node because we're removing it from the
6030          * tree, so we just steal the ref the tree was holding.
6031          */
6032         delayed_refs->num_heads--;
6033         if (head->processing == 0)
6034                 delayed_refs->num_heads_ready--;
6035         head->processing = 0;
6036         spin_unlock(&head->lock);
6037         spin_unlock(&delayed_refs->lock);
6038
6039         BUG_ON(head->extent_op);
6040         if (head->must_insert_reserved)
6041                 ret = 1;
6042
6043         mutex_unlock(&head->mutex);
6044         btrfs_put_delayed_ref(&head->node);
6045         return ret;
6046 out:
6047         spin_unlock(&head->lock);
6048
6049 out_delayed_unlock:
6050         spin_unlock(&delayed_refs->lock);
6051         return 0;
6052 }
6053
6054 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6055                            struct btrfs_root *root,
6056                            struct extent_buffer *buf,
6057                            u64 parent, int last_ref)
6058 {
6059         struct btrfs_block_group_cache *cache = NULL;
6060         int pin = 1;
6061         int ret;
6062
6063         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6064                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6065                                         buf->start, buf->len,
6066                                         parent, root->root_key.objectid,
6067                                         btrfs_header_level(buf),
6068                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6069                 BUG_ON(ret); /* -ENOMEM */
6070         }
6071
6072         if (!last_ref)
6073                 return;
6074
6075         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6076
6077         if (btrfs_header_generation(buf) == trans->transid) {
6078                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6079                         ret = check_ref_cleanup(trans, root, buf->start);
6080                         if (!ret)
6081                                 goto out;
6082                 }
6083
6084                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6085                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6086                         goto out;
6087                 }
6088
6089                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6090
6091                 btrfs_add_free_space(cache, buf->start, buf->len);
6092                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
6093                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6094                 pin = 0;
6095         }
6096 out:
6097         if (pin)
6098                 add_pinned_bytes(root->fs_info, buf->len,
6099                                  btrfs_header_level(buf),
6100                                  root->root_key.objectid);
6101
6102         /*
6103          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6104          * anymore.
6105          */
6106         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6107         btrfs_put_block_group(cache);
6108 }
6109
6110 /* Can return -ENOMEM */
6111 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6112                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6113                       u64 owner, u64 offset, int for_cow)
6114 {
6115         int ret;
6116         struct btrfs_fs_info *fs_info = root->fs_info;
6117
6118         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6119
6120         /*
6121          * tree log blocks never actually go into the extent allocation
6122          * tree, just update pinning info and exit early.
6123          */
6124         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6125                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6126                 /* unlocks the pinned mutex */
6127                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6128                 ret = 0;
6129         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6130                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6131                                         num_bytes,
6132                                         parent, root_objectid, (int)owner,
6133                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6134         } else {
6135                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6136                                                 num_bytes,
6137                                                 parent, root_objectid, owner,
6138                                                 offset, BTRFS_DROP_DELAYED_REF,
6139                                                 NULL, for_cow);
6140         }
6141         return ret;
6142 }
6143
6144 static u64 stripe_align(struct btrfs_root *root,
6145                         struct btrfs_block_group_cache *cache,
6146                         u64 val, u64 num_bytes)
6147 {
6148         u64 ret = ALIGN(val, root->stripesize);
6149         return ret;
6150 }
6151
6152 /*
6153  * when we wait for progress in the block group caching, its because
6154  * our allocation attempt failed at least once.  So, we must sleep
6155  * and let some progress happen before we try again.
6156  *
6157  * This function will sleep at least once waiting for new free space to
6158  * show up, and then it will check the block group free space numbers
6159  * for our min num_bytes.  Another option is to have it go ahead
6160  * and look in the rbtree for a free extent of a given size, but this
6161  * is a good start.
6162  *
6163  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6164  * any of the information in this block group.
6165  */
6166 static noinline void
6167 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6168                                 u64 num_bytes)
6169 {
6170         struct btrfs_caching_control *caching_ctl;
6171
6172         caching_ctl = get_caching_control(cache);
6173         if (!caching_ctl)
6174                 return;
6175
6176         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6177                    (cache->free_space_ctl->free_space >= num_bytes));
6178
6179         put_caching_control(caching_ctl);
6180 }
6181
6182 static noinline int
6183 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6184 {
6185         struct btrfs_caching_control *caching_ctl;
6186         int ret = 0;
6187
6188         caching_ctl = get_caching_control(cache);
6189         if (!caching_ctl)
6190                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6191
6192         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6193         if (cache->cached == BTRFS_CACHE_ERROR)
6194                 ret = -EIO;
6195         put_caching_control(caching_ctl);
6196         return ret;
6197 }
6198
6199 int __get_raid_index(u64 flags)
6200 {
6201         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6202                 return BTRFS_RAID_RAID10;
6203         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6204                 return BTRFS_RAID_RAID1;
6205         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6206                 return BTRFS_RAID_DUP;
6207         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6208                 return BTRFS_RAID_RAID0;
6209         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6210                 return BTRFS_RAID_RAID5;
6211         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6212                 return BTRFS_RAID_RAID6;
6213
6214         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6215 }
6216
6217 int get_block_group_index(struct btrfs_block_group_cache *cache)
6218 {
6219         return __get_raid_index(cache->flags);
6220 }
6221
6222 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6223         [BTRFS_RAID_RAID10]     = "raid10",
6224         [BTRFS_RAID_RAID1]      = "raid1",
6225         [BTRFS_RAID_DUP]        = "dup",
6226         [BTRFS_RAID_RAID0]      = "raid0",
6227         [BTRFS_RAID_SINGLE]     = "single",
6228         [BTRFS_RAID_RAID5]      = "raid5",
6229         [BTRFS_RAID_RAID6]      = "raid6",
6230 };
6231
6232 static const char *get_raid_name(enum btrfs_raid_types type)
6233 {
6234         if (type >= BTRFS_NR_RAID_TYPES)
6235                 return NULL;
6236
6237         return btrfs_raid_type_names[type];
6238 }
6239
6240 enum btrfs_loop_type {
6241         LOOP_CACHING_NOWAIT = 0,
6242         LOOP_CACHING_WAIT = 1,
6243         LOOP_ALLOC_CHUNK = 2,
6244         LOOP_NO_EMPTY_SIZE = 3,
6245 };
6246
6247 /*
6248  * walks the btree of allocated extents and find a hole of a given size.
6249  * The key ins is changed to record the hole:
6250  * ins->objectid == start position
6251  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6252  * ins->offset == the size of the hole.
6253  * Any available blocks before search_start are skipped.
6254  *
6255  * If there is no suitable free space, we will record the max size of
6256  * the free space extent currently.
6257  */
6258 static noinline int find_free_extent(struct btrfs_root *orig_root,
6259                                      u64 num_bytes, u64 empty_size,
6260                                      u64 hint_byte, struct btrfs_key *ins,
6261                                      u64 flags)
6262 {
6263         int ret = 0;
6264         struct btrfs_root *root = orig_root->fs_info->extent_root;
6265         struct btrfs_free_cluster *last_ptr = NULL;
6266         struct btrfs_block_group_cache *block_group = NULL;
6267         u64 search_start = 0;
6268         u64 max_extent_size = 0;
6269         int empty_cluster = 2 * 1024 * 1024;
6270         struct btrfs_space_info *space_info;
6271         int loop = 0;
6272         int index = __get_raid_index(flags);
6273         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6274                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6275         bool failed_cluster_refill = false;
6276         bool failed_alloc = false;
6277         bool use_cluster = true;
6278         bool have_caching_bg = false;
6279
6280         WARN_ON(num_bytes < root->sectorsize);
6281         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6282         ins->objectid = 0;
6283         ins->offset = 0;
6284
6285         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6286
6287         space_info = __find_space_info(root->fs_info, flags);
6288         if (!space_info) {
6289                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6290                 return -ENOSPC;
6291         }
6292
6293         /*
6294          * If the space info is for both data and metadata it means we have a
6295          * small filesystem and we can't use the clustering stuff.
6296          */
6297         if (btrfs_mixed_space_info(space_info))
6298                 use_cluster = false;
6299
6300         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6301                 last_ptr = &root->fs_info->meta_alloc_cluster;
6302                 if (!btrfs_test_opt(root, SSD))
6303                         empty_cluster = 64 * 1024;
6304         }
6305
6306         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6307             btrfs_test_opt(root, SSD)) {
6308                 last_ptr = &root->fs_info->data_alloc_cluster;
6309         }
6310
6311         if (last_ptr) {
6312                 spin_lock(&last_ptr->lock);
6313                 if (last_ptr->block_group)
6314                         hint_byte = last_ptr->window_start;
6315                 spin_unlock(&last_ptr->lock);
6316         }
6317
6318         search_start = max(search_start, first_logical_byte(root, 0));
6319         search_start = max(search_start, hint_byte);
6320
6321         if (!last_ptr)
6322                 empty_cluster = 0;
6323
6324         if (search_start == hint_byte) {
6325                 block_group = btrfs_lookup_block_group(root->fs_info,
6326                                                        search_start);
6327                 /*
6328                  * we don't want to use the block group if it doesn't match our
6329                  * allocation bits, or if its not cached.
6330                  *
6331                  * However if we are re-searching with an ideal block group
6332                  * picked out then we don't care that the block group is cached.
6333                  */
6334                 if (block_group && block_group_bits(block_group, flags) &&
6335                     block_group->cached != BTRFS_CACHE_NO) {
6336                         down_read(&space_info->groups_sem);
6337                         if (list_empty(&block_group->list) ||
6338                             block_group->ro) {
6339                                 /*
6340                                  * someone is removing this block group,
6341                                  * we can't jump into the have_block_group
6342                                  * target because our list pointers are not
6343                                  * valid
6344                                  */
6345                                 btrfs_put_block_group(block_group);
6346                                 up_read(&space_info->groups_sem);
6347                         } else {
6348                                 index = get_block_group_index(block_group);
6349                                 goto have_block_group;
6350                         }
6351                 } else if (block_group) {
6352                         btrfs_put_block_group(block_group);
6353                 }
6354         }
6355 search:
6356         have_caching_bg = false;
6357         down_read(&space_info->groups_sem);
6358         list_for_each_entry(block_group, &space_info->block_groups[index],
6359                             list) {
6360                 u64 offset;
6361                 int cached;
6362
6363                 btrfs_get_block_group(block_group);
6364                 search_start = block_group->key.objectid;
6365
6366                 /*
6367                  * this can happen if we end up cycling through all the
6368                  * raid types, but we want to make sure we only allocate
6369                  * for the proper type.
6370                  */
6371                 if (!block_group_bits(block_group, flags)) {
6372                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6373                                 BTRFS_BLOCK_GROUP_RAID1 |
6374                                 BTRFS_BLOCK_GROUP_RAID5 |
6375                                 BTRFS_BLOCK_GROUP_RAID6 |
6376                                 BTRFS_BLOCK_GROUP_RAID10;
6377
6378                         /*
6379                          * if they asked for extra copies and this block group
6380                          * doesn't provide them, bail.  This does allow us to
6381                          * fill raid0 from raid1.
6382                          */
6383                         if ((flags & extra) && !(block_group->flags & extra))
6384                                 goto loop;
6385                 }
6386
6387 have_block_group:
6388                 cached = block_group_cache_done(block_group);
6389                 if (unlikely(!cached)) {
6390                         ret = cache_block_group(block_group, 0);
6391                         BUG_ON(ret < 0);
6392                         ret = 0;
6393                 }
6394
6395                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6396                         goto loop;
6397                 if (unlikely(block_group->ro))
6398                         goto loop;
6399
6400                 /*
6401                  * Ok we want to try and use the cluster allocator, so
6402                  * lets look there
6403                  */
6404                 if (last_ptr) {
6405                         struct btrfs_block_group_cache *used_block_group;
6406                         unsigned long aligned_cluster;
6407                         /*
6408                          * the refill lock keeps out other
6409                          * people trying to start a new cluster
6410                          */
6411                         spin_lock(&last_ptr->refill_lock);
6412                         used_block_group = last_ptr->block_group;
6413                         if (used_block_group != block_group &&
6414                             (!used_block_group ||
6415                              used_block_group->ro ||
6416                              !block_group_bits(used_block_group, flags)))
6417                                 goto refill_cluster;
6418
6419                         if (used_block_group != block_group)
6420                                 btrfs_get_block_group(used_block_group);
6421
6422                         offset = btrfs_alloc_from_cluster(used_block_group,
6423                                                 last_ptr,
6424                                                 num_bytes,
6425                                                 used_block_group->key.objectid,
6426                                                 &max_extent_size);
6427                         if (offset) {
6428                                 /* we have a block, we're done */
6429                                 spin_unlock(&last_ptr->refill_lock);
6430                                 trace_btrfs_reserve_extent_cluster(root,
6431                                                 used_block_group,
6432                                                 search_start, num_bytes);
6433                                 if (used_block_group != block_group) {
6434                                         btrfs_put_block_group(block_group);
6435                                         block_group = used_block_group;
6436                                 }
6437                                 goto checks;
6438                         }
6439
6440                         WARN_ON(last_ptr->block_group != used_block_group);
6441                         if (used_block_group != block_group)
6442                                 btrfs_put_block_group(used_block_group);
6443 refill_cluster:
6444                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6445                          * set up a new clusters, so lets just skip it
6446                          * and let the allocator find whatever block
6447                          * it can find.  If we reach this point, we
6448                          * will have tried the cluster allocator
6449                          * plenty of times and not have found
6450                          * anything, so we are likely way too
6451                          * fragmented for the clustering stuff to find
6452                          * anything.
6453                          *
6454                          * However, if the cluster is taken from the
6455                          * current block group, release the cluster
6456                          * first, so that we stand a better chance of
6457                          * succeeding in the unclustered
6458                          * allocation.  */
6459                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6460                             last_ptr->block_group != block_group) {
6461                                 spin_unlock(&last_ptr->refill_lock);
6462                                 goto unclustered_alloc;
6463                         }
6464
6465                         /*
6466                          * this cluster didn't work out, free it and
6467                          * start over
6468                          */
6469                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6470
6471                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6472                                 spin_unlock(&last_ptr->refill_lock);
6473                                 goto unclustered_alloc;
6474                         }
6475
6476                         aligned_cluster = max_t(unsigned long,
6477                                                 empty_cluster + empty_size,
6478                                               block_group->full_stripe_len);
6479
6480                         /* allocate a cluster in this block group */
6481                         ret = btrfs_find_space_cluster(root, block_group,
6482                                                        last_ptr, search_start,
6483                                                        num_bytes,
6484                                                        aligned_cluster);
6485                         if (ret == 0) {
6486                                 /*
6487                                  * now pull our allocation out of this
6488                                  * cluster
6489                                  */
6490                                 offset = btrfs_alloc_from_cluster(block_group,
6491                                                         last_ptr,
6492                                                         num_bytes,
6493                                                         search_start,
6494                                                         &max_extent_size);
6495                                 if (offset) {
6496                                         /* we found one, proceed */
6497                                         spin_unlock(&last_ptr->refill_lock);
6498                                         trace_btrfs_reserve_extent_cluster(root,
6499                                                 block_group, search_start,
6500                                                 num_bytes);
6501                                         goto checks;
6502                                 }
6503                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6504                                    && !failed_cluster_refill) {
6505                                 spin_unlock(&last_ptr->refill_lock);
6506
6507                                 failed_cluster_refill = true;
6508                                 wait_block_group_cache_progress(block_group,
6509                                        num_bytes + empty_cluster + empty_size);
6510                                 goto have_block_group;
6511                         }
6512
6513                         /*
6514                          * at this point we either didn't find a cluster
6515                          * or we weren't able to allocate a block from our
6516                          * cluster.  Free the cluster we've been trying
6517                          * to use, and go to the next block group
6518                          */
6519                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6520                         spin_unlock(&last_ptr->refill_lock);
6521                         goto loop;
6522                 }
6523
6524 unclustered_alloc:
6525                 spin_lock(&block_group->free_space_ctl->tree_lock);
6526                 if (cached &&
6527                     block_group->free_space_ctl->free_space <
6528                     num_bytes + empty_cluster + empty_size) {
6529                         if (block_group->free_space_ctl->free_space >
6530                             max_extent_size)
6531                                 max_extent_size =
6532                                         block_group->free_space_ctl->free_space;
6533                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6534                         goto loop;
6535                 }
6536                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6537
6538                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6539                                                     num_bytes, empty_size,
6540                                                     &max_extent_size);
6541                 /*
6542                  * If we didn't find a chunk, and we haven't failed on this
6543                  * block group before, and this block group is in the middle of
6544                  * caching and we are ok with waiting, then go ahead and wait
6545                  * for progress to be made, and set failed_alloc to true.
6546                  *
6547                  * If failed_alloc is true then we've already waited on this
6548                  * block group once and should move on to the next block group.
6549                  */
6550                 if (!offset && !failed_alloc && !cached &&
6551                     loop > LOOP_CACHING_NOWAIT) {
6552                         wait_block_group_cache_progress(block_group,
6553                                                 num_bytes + empty_size);
6554                         failed_alloc = true;
6555                         goto have_block_group;
6556                 } else if (!offset) {
6557                         if (!cached)
6558                                 have_caching_bg = true;
6559                         goto loop;
6560                 }
6561 checks:
6562                 search_start = stripe_align(root, block_group,
6563                                             offset, num_bytes);
6564
6565                 /* move on to the next group */
6566                 if (search_start + num_bytes >
6567                     block_group->key.objectid + block_group->key.offset) {
6568                         btrfs_add_free_space(block_group, offset, num_bytes);
6569                         goto loop;
6570                 }
6571
6572                 if (offset < search_start)
6573                         btrfs_add_free_space(block_group, offset,
6574                                              search_start - offset);
6575                 BUG_ON(offset > search_start);
6576
6577                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6578                                                   alloc_type);
6579                 if (ret == -EAGAIN) {
6580                         btrfs_add_free_space(block_group, offset, num_bytes);
6581                         goto loop;
6582                 }
6583
6584                 /* we are all good, lets return */
6585                 ins->objectid = search_start;
6586                 ins->offset = num_bytes;
6587
6588                 trace_btrfs_reserve_extent(orig_root, block_group,
6589                                            search_start, num_bytes);
6590                 btrfs_put_block_group(block_group);
6591                 break;
6592 loop:
6593                 failed_cluster_refill = false;
6594                 failed_alloc = false;
6595                 BUG_ON(index != get_block_group_index(block_group));
6596                 btrfs_put_block_group(block_group);
6597         }
6598         up_read(&space_info->groups_sem);
6599
6600         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6601                 goto search;
6602
6603         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6604                 goto search;
6605
6606         /*
6607          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6608          *                      caching kthreads as we move along
6609          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6610          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6611          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6612          *                      again
6613          */
6614         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6615                 index = 0;
6616                 loop++;
6617                 if (loop == LOOP_ALLOC_CHUNK) {
6618                         struct btrfs_trans_handle *trans;
6619                         int exist = 0;
6620
6621                         trans = current->journal_info;
6622                         if (trans)
6623                                 exist = 1;
6624                         else
6625                                 trans = btrfs_join_transaction(root);
6626
6627                         if (IS_ERR(trans)) {
6628                                 ret = PTR_ERR(trans);
6629                                 goto out;
6630                         }
6631
6632                         ret = do_chunk_alloc(trans, root, flags,
6633                                              CHUNK_ALLOC_FORCE);
6634                         /*
6635                          * Do not bail out on ENOSPC since we
6636                          * can do more things.
6637                          */
6638                         if (ret < 0 && ret != -ENOSPC)
6639                                 btrfs_abort_transaction(trans,
6640                                                         root, ret);
6641                         else
6642                                 ret = 0;
6643                         if (!exist)
6644                                 btrfs_end_transaction(trans, root);
6645                         if (ret)
6646                                 goto out;
6647                 }
6648
6649                 if (loop == LOOP_NO_EMPTY_SIZE) {
6650                         empty_size = 0;
6651                         empty_cluster = 0;
6652                 }
6653
6654                 goto search;
6655         } else if (!ins->objectid) {
6656                 ret = -ENOSPC;
6657         } else if (ins->objectid) {
6658                 ret = 0;
6659         }
6660 out:
6661         if (ret == -ENOSPC)
6662                 ins->offset = max_extent_size;
6663         return ret;
6664 }
6665
6666 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6667                             int dump_block_groups)
6668 {
6669         struct btrfs_block_group_cache *cache;
6670         int index = 0;
6671
6672         spin_lock(&info->lock);
6673         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6674                info->flags,
6675                info->total_bytes - info->bytes_used - info->bytes_pinned -
6676                info->bytes_reserved - info->bytes_readonly,
6677                (info->full) ? "" : "not ");
6678         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6679                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6680                info->total_bytes, info->bytes_used, info->bytes_pinned,
6681                info->bytes_reserved, info->bytes_may_use,
6682                info->bytes_readonly);
6683         spin_unlock(&info->lock);
6684
6685         if (!dump_block_groups)
6686                 return;
6687
6688         down_read(&info->groups_sem);
6689 again:
6690         list_for_each_entry(cache, &info->block_groups[index], list) {
6691                 spin_lock(&cache->lock);
6692                 printk(KERN_INFO "BTRFS: "
6693                            "block group %llu has %llu bytes, "
6694                            "%llu used %llu pinned %llu reserved %s\n",
6695                        cache->key.objectid, cache->key.offset,
6696                        btrfs_block_group_used(&cache->item), cache->pinned,
6697                        cache->reserved, cache->ro ? "[readonly]" : "");
6698                 btrfs_dump_free_space(cache, bytes);
6699                 spin_unlock(&cache->lock);
6700         }
6701         if (++index < BTRFS_NR_RAID_TYPES)
6702                 goto again;
6703         up_read(&info->groups_sem);
6704 }
6705
6706 int btrfs_reserve_extent(struct btrfs_root *root,
6707                          u64 num_bytes, u64 min_alloc_size,
6708                          u64 empty_size, u64 hint_byte,
6709                          struct btrfs_key *ins, int is_data)
6710 {
6711         bool final_tried = false;
6712         u64 flags;
6713         int ret;
6714
6715         flags = btrfs_get_alloc_profile(root, is_data);
6716 again:
6717         WARN_ON(num_bytes < root->sectorsize);
6718         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6719                                flags);
6720
6721         if (ret == -ENOSPC) {
6722                 if (!final_tried && ins->offset) {
6723                         num_bytes = min(num_bytes >> 1, ins->offset);
6724                         num_bytes = round_down(num_bytes, root->sectorsize);
6725                         num_bytes = max(num_bytes, min_alloc_size);
6726                         if (num_bytes == min_alloc_size)
6727                                 final_tried = true;
6728                         goto again;
6729                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6730                         struct btrfs_space_info *sinfo;
6731
6732                         sinfo = __find_space_info(root->fs_info, flags);
6733                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6734                                 flags, num_bytes);
6735                         if (sinfo)
6736                                 dump_space_info(sinfo, num_bytes, 1);
6737                 }
6738         }
6739
6740         return ret;
6741 }
6742
6743 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6744                                         u64 start, u64 len, int pin)
6745 {
6746         struct btrfs_block_group_cache *cache;
6747         int ret = 0;
6748
6749         cache = btrfs_lookup_block_group(root->fs_info, start);
6750         if (!cache) {
6751                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6752                         start);
6753                 return -ENOSPC;
6754         }
6755
6756         if (btrfs_test_opt(root, DISCARD))
6757                 ret = btrfs_discard_extent(root, start, len, NULL);
6758
6759         if (pin)
6760                 pin_down_extent(root, cache, start, len, 1);
6761         else {
6762                 btrfs_add_free_space(cache, start, len);
6763                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6764         }
6765         btrfs_put_block_group(cache);
6766
6767         trace_btrfs_reserved_extent_free(root, start, len);
6768
6769         return ret;
6770 }
6771
6772 int btrfs_free_reserved_extent(struct btrfs_root *root,
6773                                         u64 start, u64 len)
6774 {
6775         return __btrfs_free_reserved_extent(root, start, len, 0);
6776 }
6777
6778 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6779                                        u64 start, u64 len)
6780 {
6781         return __btrfs_free_reserved_extent(root, start, len, 1);
6782 }
6783
6784 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6785                                       struct btrfs_root *root,
6786                                       u64 parent, u64 root_objectid,
6787                                       u64 flags, u64 owner, u64 offset,
6788                                       struct btrfs_key *ins, int ref_mod)
6789 {
6790         int ret;
6791         struct btrfs_fs_info *fs_info = root->fs_info;
6792         struct btrfs_extent_item *extent_item;
6793         struct btrfs_extent_inline_ref *iref;
6794         struct btrfs_path *path;
6795         struct extent_buffer *leaf;
6796         int type;
6797         u32 size;
6798
6799         if (parent > 0)
6800                 type = BTRFS_SHARED_DATA_REF_KEY;
6801         else
6802                 type = BTRFS_EXTENT_DATA_REF_KEY;
6803
6804         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6805
6806         path = btrfs_alloc_path();
6807         if (!path)
6808                 return -ENOMEM;
6809
6810         path->leave_spinning = 1;
6811         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6812                                       ins, size);
6813         if (ret) {
6814                 btrfs_free_path(path);
6815                 return ret;
6816         }
6817
6818         leaf = path->nodes[0];
6819         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6820                                      struct btrfs_extent_item);
6821         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6822         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6823         btrfs_set_extent_flags(leaf, extent_item,
6824                                flags | BTRFS_EXTENT_FLAG_DATA);
6825
6826         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6827         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6828         if (parent > 0) {
6829                 struct btrfs_shared_data_ref *ref;
6830                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6831                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6832                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6833         } else {
6834                 struct btrfs_extent_data_ref *ref;
6835                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6836                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6837                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6838                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6839                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6840         }
6841
6842         btrfs_mark_buffer_dirty(path->nodes[0]);
6843         btrfs_free_path(path);
6844
6845         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6846         if (ret) { /* -ENOENT, logic error */
6847                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6848                         ins->objectid, ins->offset);
6849                 BUG();
6850         }
6851         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6852         return ret;
6853 }
6854
6855 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6856                                      struct btrfs_root *root,
6857                                      u64 parent, u64 root_objectid,
6858                                      u64 flags, struct btrfs_disk_key *key,
6859                                      int level, struct btrfs_key *ins)
6860 {
6861         int ret;
6862         struct btrfs_fs_info *fs_info = root->fs_info;
6863         struct btrfs_extent_item *extent_item;
6864         struct btrfs_tree_block_info *block_info;
6865         struct btrfs_extent_inline_ref *iref;
6866         struct btrfs_path *path;
6867         struct extent_buffer *leaf;
6868         u32 size = sizeof(*extent_item) + sizeof(*iref);
6869         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6870                                                  SKINNY_METADATA);
6871
6872         if (!skinny_metadata)
6873                 size += sizeof(*block_info);
6874
6875         path = btrfs_alloc_path();
6876         if (!path) {
6877                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6878                                                    root->leafsize);
6879                 return -ENOMEM;
6880         }
6881
6882         path->leave_spinning = 1;
6883         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6884                                       ins, size);
6885         if (ret) {
6886                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6887                                                    root->leafsize);
6888                 btrfs_free_path(path);
6889                 return ret;
6890         }
6891
6892         leaf = path->nodes[0];
6893         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6894                                      struct btrfs_extent_item);
6895         btrfs_set_extent_refs(leaf, extent_item, 1);
6896         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6897         btrfs_set_extent_flags(leaf, extent_item,
6898                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6899
6900         if (skinny_metadata) {
6901                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6902         } else {
6903                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6904                 btrfs_set_tree_block_key(leaf, block_info, key);
6905                 btrfs_set_tree_block_level(leaf, block_info, level);
6906                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6907         }
6908
6909         if (parent > 0) {
6910                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6911                 btrfs_set_extent_inline_ref_type(leaf, iref,
6912                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6913                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6914         } else {
6915                 btrfs_set_extent_inline_ref_type(leaf, iref,
6916                                                  BTRFS_TREE_BLOCK_REF_KEY);
6917                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6918         }
6919
6920         btrfs_mark_buffer_dirty(leaf);
6921         btrfs_free_path(path);
6922
6923         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6924         if (ret) { /* -ENOENT, logic error */
6925                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6926                         ins->objectid, ins->offset);
6927                 BUG();
6928         }
6929
6930         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6931         return ret;
6932 }
6933
6934 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6935                                      struct btrfs_root *root,
6936                                      u64 root_objectid, u64 owner,
6937                                      u64 offset, struct btrfs_key *ins)
6938 {
6939         int ret;
6940
6941         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6942
6943         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6944                                          ins->offset, 0,
6945                                          root_objectid, owner, offset,
6946                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6947         return ret;
6948 }
6949
6950 /*
6951  * this is used by the tree logging recovery code.  It records that
6952  * an extent has been allocated and makes sure to clear the free
6953  * space cache bits as well
6954  */
6955 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6956                                    struct btrfs_root *root,
6957                                    u64 root_objectid, u64 owner, u64 offset,
6958                                    struct btrfs_key *ins)
6959 {
6960         int ret;
6961         struct btrfs_block_group_cache *block_group;
6962
6963         /*
6964          * Mixed block groups will exclude before processing the log so we only
6965          * need to do the exlude dance if this fs isn't mixed.
6966          */
6967         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6968                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6969                 if (ret)
6970                         return ret;
6971         }
6972
6973         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6974         if (!block_group)
6975                 return -EINVAL;
6976
6977         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6978                                           RESERVE_ALLOC_NO_ACCOUNT);
6979         BUG_ON(ret); /* logic error */
6980         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6981                                          0, owner, offset, ins, 1);
6982         btrfs_put_block_group(block_group);
6983         return ret;
6984 }
6985
6986 static struct extent_buffer *
6987 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6988                       u64 bytenr, u32 blocksize, int level)
6989 {
6990         struct extent_buffer *buf;
6991
6992         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6993         if (!buf)
6994                 return ERR_PTR(-ENOMEM);
6995         btrfs_set_header_generation(buf, trans->transid);
6996         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6997         btrfs_tree_lock(buf);
6998         clean_tree_block(trans, root, buf);
6999         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7000
7001         btrfs_set_lock_blocking(buf);
7002         btrfs_set_buffer_uptodate(buf);
7003
7004         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7005                 /*
7006                  * we allow two log transactions at a time, use different
7007                  * EXENT bit to differentiate dirty pages.
7008                  */
7009                 if (root->log_transid % 2 == 0)
7010                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7011                                         buf->start + buf->len - 1, GFP_NOFS);
7012                 else
7013                         set_extent_new(&root->dirty_log_pages, buf->start,
7014                                         buf->start + buf->len - 1, GFP_NOFS);
7015         } else {
7016                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7017                          buf->start + buf->len - 1, GFP_NOFS);
7018         }
7019         trans->blocks_used++;
7020         /* this returns a buffer locked for blocking */
7021         return buf;
7022 }
7023
7024 static struct btrfs_block_rsv *
7025 use_block_rsv(struct btrfs_trans_handle *trans,
7026               struct btrfs_root *root, u32 blocksize)
7027 {
7028         struct btrfs_block_rsv *block_rsv;
7029         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7030         int ret;
7031         bool global_updated = false;
7032
7033         block_rsv = get_block_rsv(trans, root);
7034
7035         if (unlikely(block_rsv->size == 0))
7036                 goto try_reserve;
7037 again:
7038         ret = block_rsv_use_bytes(block_rsv, blocksize);
7039         if (!ret)
7040                 return block_rsv;
7041
7042         if (block_rsv->failfast)
7043                 return ERR_PTR(ret);
7044
7045         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7046                 global_updated = true;
7047                 update_global_block_rsv(root->fs_info);
7048                 goto again;
7049         }
7050
7051         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7052                 static DEFINE_RATELIMIT_STATE(_rs,
7053                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7054                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7055                 if (__ratelimit(&_rs))
7056                         WARN(1, KERN_DEBUG
7057                                 "BTRFS: block rsv returned %d\n", ret);
7058         }
7059 try_reserve:
7060         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7061                                      BTRFS_RESERVE_NO_FLUSH);
7062         if (!ret)
7063                 return block_rsv;
7064         /*
7065          * If we couldn't reserve metadata bytes try and use some from
7066          * the global reserve if its space type is the same as the global
7067          * reservation.
7068          */
7069         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7070             block_rsv->space_info == global_rsv->space_info) {
7071                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7072                 if (!ret)
7073                         return global_rsv;
7074         }
7075         return ERR_PTR(ret);
7076 }
7077
7078 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7079                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7080 {
7081         block_rsv_add_bytes(block_rsv, blocksize, 0);
7082         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7083 }
7084
7085 /*
7086  * finds a free extent and does all the dirty work required for allocation
7087  * returns the key for the extent through ins, and a tree buffer for
7088  * the first block of the extent through buf.
7089  *
7090  * returns the tree buffer or NULL.
7091  */
7092 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
7093                                         struct btrfs_root *root, u32 blocksize,
7094                                         u64 parent, u64 root_objectid,
7095                                         struct btrfs_disk_key *key, int level,
7096                                         u64 hint, u64 empty_size)
7097 {
7098         struct btrfs_key ins;
7099         struct btrfs_block_rsv *block_rsv;
7100         struct extent_buffer *buf;
7101         u64 flags = 0;
7102         int ret;
7103         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7104                                                  SKINNY_METADATA);
7105
7106         block_rsv = use_block_rsv(trans, root, blocksize);
7107         if (IS_ERR(block_rsv))
7108                 return ERR_CAST(block_rsv);
7109
7110         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7111                                    empty_size, hint, &ins, 0);
7112         if (ret) {
7113                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7114                 return ERR_PTR(ret);
7115         }
7116
7117         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7118                                     blocksize, level);
7119         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7120
7121         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7122                 if (parent == 0)
7123                         parent = ins.objectid;
7124                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7125         } else
7126                 BUG_ON(parent > 0);
7127
7128         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7129                 struct btrfs_delayed_extent_op *extent_op;
7130                 extent_op = btrfs_alloc_delayed_extent_op();
7131                 BUG_ON(!extent_op); /* -ENOMEM */
7132                 if (key)
7133                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7134                 else
7135                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7136                 extent_op->flags_to_set = flags;
7137                 if (skinny_metadata)
7138                         extent_op->update_key = 0;
7139                 else
7140                         extent_op->update_key = 1;
7141                 extent_op->update_flags = 1;
7142                 extent_op->is_data = 0;
7143                 extent_op->level = level;
7144
7145                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7146                                         ins.objectid,
7147                                         ins.offset, parent, root_objectid,
7148                                         level, BTRFS_ADD_DELAYED_EXTENT,
7149                                         extent_op, 0);
7150                 BUG_ON(ret); /* -ENOMEM */
7151         }
7152         return buf;
7153 }
7154
7155 struct walk_control {
7156         u64 refs[BTRFS_MAX_LEVEL];
7157         u64 flags[BTRFS_MAX_LEVEL];
7158         struct btrfs_key update_progress;
7159         int stage;
7160         int level;
7161         int shared_level;
7162         int update_ref;
7163         int keep_locks;
7164         int reada_slot;
7165         int reada_count;
7166         int for_reloc;
7167 };
7168
7169 #define DROP_REFERENCE  1
7170 #define UPDATE_BACKREF  2
7171
7172 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7173                                      struct btrfs_root *root,
7174                                      struct walk_control *wc,
7175                                      struct btrfs_path *path)
7176 {
7177         u64 bytenr;
7178         u64 generation;
7179         u64 refs;
7180         u64 flags;
7181         u32 nritems;
7182         u32 blocksize;
7183         struct btrfs_key key;
7184         struct extent_buffer *eb;
7185         int ret;
7186         int slot;
7187         int nread = 0;
7188
7189         if (path->slots[wc->level] < wc->reada_slot) {
7190                 wc->reada_count = wc->reada_count * 2 / 3;
7191                 wc->reada_count = max(wc->reada_count, 2);
7192         } else {
7193                 wc->reada_count = wc->reada_count * 3 / 2;
7194                 wc->reada_count = min_t(int, wc->reada_count,
7195                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7196         }
7197
7198         eb = path->nodes[wc->level];
7199         nritems = btrfs_header_nritems(eb);
7200         blocksize = btrfs_level_size(root, wc->level - 1);
7201
7202         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7203                 if (nread >= wc->reada_count)
7204                         break;
7205
7206                 cond_resched();
7207                 bytenr = btrfs_node_blockptr(eb, slot);
7208                 generation = btrfs_node_ptr_generation(eb, slot);
7209
7210                 if (slot == path->slots[wc->level])
7211                         goto reada;
7212
7213                 if (wc->stage == UPDATE_BACKREF &&
7214                     generation <= root->root_key.offset)
7215                         continue;
7216
7217                 /* We don't lock the tree block, it's OK to be racy here */
7218                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7219                                                wc->level - 1, 1, &refs,
7220                                                &flags);
7221                 /* We don't care about errors in readahead. */
7222                 if (ret < 0)
7223                         continue;
7224                 BUG_ON(refs == 0);
7225
7226                 if (wc->stage == DROP_REFERENCE) {
7227                         if (refs == 1)
7228                                 goto reada;
7229
7230                         if (wc->level == 1 &&
7231                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7232                                 continue;
7233                         if (!wc->update_ref ||
7234                             generation <= root->root_key.offset)
7235                                 continue;
7236                         btrfs_node_key_to_cpu(eb, &key, slot);
7237                         ret = btrfs_comp_cpu_keys(&key,
7238                                                   &wc->update_progress);
7239                         if (ret < 0)
7240                                 continue;
7241                 } else {
7242                         if (wc->level == 1 &&
7243                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7244                                 continue;
7245                 }
7246 reada:
7247                 ret = readahead_tree_block(root, bytenr, blocksize,
7248                                            generation);
7249                 if (ret)
7250                         break;
7251                 nread++;
7252         }
7253         wc->reada_slot = slot;
7254 }
7255
7256 /*
7257  * helper to process tree block while walking down the tree.
7258  *
7259  * when wc->stage == UPDATE_BACKREF, this function updates
7260  * back refs for pointers in the block.
7261  *
7262  * NOTE: return value 1 means we should stop walking down.
7263  */
7264 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7265                                    struct btrfs_root *root,
7266                                    struct btrfs_path *path,
7267                                    struct walk_control *wc, int lookup_info)
7268 {
7269         int level = wc->level;
7270         struct extent_buffer *eb = path->nodes[level];
7271         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7272         int ret;
7273
7274         if (wc->stage == UPDATE_BACKREF &&
7275             btrfs_header_owner(eb) != root->root_key.objectid)
7276                 return 1;
7277
7278         /*
7279          * when reference count of tree block is 1, it won't increase
7280          * again. once full backref flag is set, we never clear it.
7281          */
7282         if (lookup_info &&
7283             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7284              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7285                 BUG_ON(!path->locks[level]);
7286                 ret = btrfs_lookup_extent_info(trans, root,
7287                                                eb->start, level, 1,
7288                                                &wc->refs[level],
7289                                                &wc->flags[level]);
7290                 BUG_ON(ret == -ENOMEM);
7291                 if (ret)
7292                         return ret;
7293                 BUG_ON(wc->refs[level] == 0);
7294         }
7295
7296         if (wc->stage == DROP_REFERENCE) {
7297                 if (wc->refs[level] > 1)
7298                         return 1;
7299
7300                 if (path->locks[level] && !wc->keep_locks) {
7301                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7302                         path->locks[level] = 0;
7303                 }
7304                 return 0;
7305         }
7306
7307         /* wc->stage == UPDATE_BACKREF */
7308         if (!(wc->flags[level] & flag)) {
7309                 BUG_ON(!path->locks[level]);
7310                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7311                 BUG_ON(ret); /* -ENOMEM */
7312                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7313                 BUG_ON(ret); /* -ENOMEM */
7314                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7315                                                   eb->len, flag,
7316                                                   btrfs_header_level(eb), 0);
7317                 BUG_ON(ret); /* -ENOMEM */
7318                 wc->flags[level] |= flag;
7319         }
7320
7321         /*
7322          * the block is shared by multiple trees, so it's not good to
7323          * keep the tree lock
7324          */
7325         if (path->locks[level] && level > 0) {
7326                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7327                 path->locks[level] = 0;
7328         }
7329         return 0;
7330 }
7331
7332 /*
7333  * helper to process tree block pointer.
7334  *
7335  * when wc->stage == DROP_REFERENCE, this function checks
7336  * reference count of the block pointed to. if the block
7337  * is shared and we need update back refs for the subtree
7338  * rooted at the block, this function changes wc->stage to
7339  * UPDATE_BACKREF. if the block is shared and there is no
7340  * need to update back, this function drops the reference
7341  * to the block.
7342  *
7343  * NOTE: return value 1 means we should stop walking down.
7344  */
7345 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7346                                  struct btrfs_root *root,
7347                                  struct btrfs_path *path,
7348                                  struct walk_control *wc, int *lookup_info)
7349 {
7350         u64 bytenr;
7351         u64 generation;
7352         u64 parent;
7353         u32 blocksize;
7354         struct btrfs_key key;
7355         struct extent_buffer *next;
7356         int level = wc->level;
7357         int reada = 0;
7358         int ret = 0;
7359
7360         generation = btrfs_node_ptr_generation(path->nodes[level],
7361                                                path->slots[level]);
7362         /*
7363          * if the lower level block was created before the snapshot
7364          * was created, we know there is no need to update back refs
7365          * for the subtree
7366          */
7367         if (wc->stage == UPDATE_BACKREF &&
7368             generation <= root->root_key.offset) {
7369                 *lookup_info = 1;
7370                 return 1;
7371         }
7372
7373         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7374         blocksize = btrfs_level_size(root, level - 1);
7375
7376         next = btrfs_find_tree_block(root, bytenr, blocksize);
7377         if (!next) {
7378                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7379                 if (!next)
7380                         return -ENOMEM;
7381                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7382                                                level - 1);
7383                 reada = 1;
7384         }
7385         btrfs_tree_lock(next);
7386         btrfs_set_lock_blocking(next);
7387
7388         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7389                                        &wc->refs[level - 1],
7390                                        &wc->flags[level - 1]);
7391         if (ret < 0) {
7392                 btrfs_tree_unlock(next);
7393                 return ret;
7394         }
7395
7396         if (unlikely(wc->refs[level - 1] == 0)) {
7397                 btrfs_err(root->fs_info, "Missing references.");
7398                 BUG();
7399         }
7400         *lookup_info = 0;
7401
7402         if (wc->stage == DROP_REFERENCE) {
7403                 if (wc->refs[level - 1] > 1) {
7404                         if (level == 1 &&
7405                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7406                                 goto skip;
7407
7408                         if (!wc->update_ref ||
7409                             generation <= root->root_key.offset)
7410                                 goto skip;
7411
7412                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7413                                               path->slots[level]);
7414                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7415                         if (ret < 0)
7416                                 goto skip;
7417
7418                         wc->stage = UPDATE_BACKREF;
7419                         wc->shared_level = level - 1;
7420                 }
7421         } else {
7422                 if (level == 1 &&
7423                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7424                         goto skip;
7425         }
7426
7427         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7428                 btrfs_tree_unlock(next);
7429                 free_extent_buffer(next);
7430                 next = NULL;
7431                 *lookup_info = 1;
7432         }
7433
7434         if (!next) {
7435                 if (reada && level == 1)
7436                         reada_walk_down(trans, root, wc, path);
7437                 next = read_tree_block(root, bytenr, blocksize, generation);
7438                 if (!next || !extent_buffer_uptodate(next)) {
7439                         free_extent_buffer(next);
7440                         return -EIO;
7441                 }
7442                 btrfs_tree_lock(next);
7443                 btrfs_set_lock_blocking(next);
7444         }
7445
7446         level--;
7447         BUG_ON(level != btrfs_header_level(next));
7448         path->nodes[level] = next;
7449         path->slots[level] = 0;
7450         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7451         wc->level = level;
7452         if (wc->level == 1)
7453                 wc->reada_slot = 0;
7454         return 0;
7455 skip:
7456         wc->refs[level - 1] = 0;
7457         wc->flags[level - 1] = 0;
7458         if (wc->stage == DROP_REFERENCE) {
7459                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7460                         parent = path->nodes[level]->start;
7461                 } else {
7462                         BUG_ON(root->root_key.objectid !=
7463                                btrfs_header_owner(path->nodes[level]));
7464                         parent = 0;
7465                 }
7466
7467                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7468                                 root->root_key.objectid, level - 1, 0, 0);
7469                 BUG_ON(ret); /* -ENOMEM */
7470         }
7471         btrfs_tree_unlock(next);
7472         free_extent_buffer(next);
7473         *lookup_info = 1;
7474         return 1;
7475 }
7476
7477 /*
7478  * helper to process tree block while walking up the tree.
7479  *
7480  * when wc->stage == DROP_REFERENCE, this function drops
7481  * reference count on the block.
7482  *
7483  * when wc->stage == UPDATE_BACKREF, this function changes
7484  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7485  * to UPDATE_BACKREF previously while processing the block.
7486  *
7487  * NOTE: return value 1 means we should stop walking up.
7488  */
7489 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7490                                  struct btrfs_root *root,
7491                                  struct btrfs_path *path,
7492                                  struct walk_control *wc)
7493 {
7494         int ret;
7495         int level = wc->level;
7496         struct extent_buffer *eb = path->nodes[level];
7497         u64 parent = 0;
7498
7499         if (wc->stage == UPDATE_BACKREF) {
7500                 BUG_ON(wc->shared_level < level);
7501                 if (level < wc->shared_level)
7502                         goto out;
7503
7504                 ret = find_next_key(path, level + 1, &wc->update_progress);
7505                 if (ret > 0)
7506                         wc->update_ref = 0;
7507
7508                 wc->stage = DROP_REFERENCE;
7509                 wc->shared_level = -1;
7510                 path->slots[level] = 0;
7511
7512                 /*
7513                  * check reference count again if the block isn't locked.
7514                  * we should start walking down the tree again if reference
7515                  * count is one.
7516                  */
7517                 if (!path->locks[level]) {
7518                         BUG_ON(level == 0);
7519                         btrfs_tree_lock(eb);
7520                         btrfs_set_lock_blocking(eb);
7521                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7522
7523                         ret = btrfs_lookup_extent_info(trans, root,
7524                                                        eb->start, level, 1,
7525                                                        &wc->refs[level],
7526                                                        &wc->flags[level]);
7527                         if (ret < 0) {
7528                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7529                                 path->locks[level] = 0;
7530                                 return ret;
7531                         }
7532                         BUG_ON(wc->refs[level] == 0);
7533                         if (wc->refs[level] == 1) {
7534                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7535                                 path->locks[level] = 0;
7536                                 return 1;
7537                         }
7538                 }
7539         }
7540
7541         /* wc->stage == DROP_REFERENCE */
7542         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7543
7544         if (wc->refs[level] == 1) {
7545                 if (level == 0) {
7546                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7547                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7548                                                     wc->for_reloc);
7549                         else
7550                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7551                                                     wc->for_reloc);
7552                         BUG_ON(ret); /* -ENOMEM */
7553                 }
7554                 /* make block locked assertion in clean_tree_block happy */
7555                 if (!path->locks[level] &&
7556                     btrfs_header_generation(eb) == trans->transid) {
7557                         btrfs_tree_lock(eb);
7558                         btrfs_set_lock_blocking(eb);
7559                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7560                 }
7561                 clean_tree_block(trans, root, eb);
7562         }
7563
7564         if (eb == root->node) {
7565                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7566                         parent = eb->start;
7567                 else
7568                         BUG_ON(root->root_key.objectid !=
7569                                btrfs_header_owner(eb));
7570         } else {
7571                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7572                         parent = path->nodes[level + 1]->start;
7573                 else
7574                         BUG_ON(root->root_key.objectid !=
7575                                btrfs_header_owner(path->nodes[level + 1]));
7576         }
7577
7578         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7579 out:
7580         wc->refs[level] = 0;
7581         wc->flags[level] = 0;
7582         return 0;
7583 }
7584
7585 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7586                                    struct btrfs_root *root,
7587                                    struct btrfs_path *path,
7588                                    struct walk_control *wc)
7589 {
7590         int level = wc->level;
7591         int lookup_info = 1;
7592         int ret;
7593
7594         while (level >= 0) {
7595                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7596                 if (ret > 0)
7597                         break;
7598
7599                 if (level == 0)
7600                         break;
7601
7602                 if (path->slots[level] >=
7603                     btrfs_header_nritems(path->nodes[level]))
7604                         break;
7605
7606                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7607                 if (ret > 0) {
7608                         path->slots[level]++;
7609                         continue;
7610                 } else if (ret < 0)
7611                         return ret;
7612                 level = wc->level;
7613         }
7614         return 0;
7615 }
7616
7617 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7618                                  struct btrfs_root *root,
7619                                  struct btrfs_path *path,
7620                                  struct walk_control *wc, int max_level)
7621 {
7622         int level = wc->level;
7623         int ret;
7624
7625         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7626         while (level < max_level && path->nodes[level]) {
7627                 wc->level = level;
7628                 if (path->slots[level] + 1 <
7629                     btrfs_header_nritems(path->nodes[level])) {
7630                         path->slots[level]++;
7631                         return 0;
7632                 } else {
7633                         ret = walk_up_proc(trans, root, path, wc);
7634                         if (ret > 0)
7635                                 return 0;
7636
7637                         if (path->locks[level]) {
7638                                 btrfs_tree_unlock_rw(path->nodes[level],
7639                                                      path->locks[level]);
7640                                 path->locks[level] = 0;
7641                         }
7642                         free_extent_buffer(path->nodes[level]);
7643                         path->nodes[level] = NULL;
7644                         level++;
7645                 }
7646         }
7647         return 1;
7648 }
7649
7650 /*
7651  * drop a subvolume tree.
7652  *
7653  * this function traverses the tree freeing any blocks that only
7654  * referenced by the tree.
7655  *
7656  * when a shared tree block is found. this function decreases its
7657  * reference count by one. if update_ref is true, this function
7658  * also make sure backrefs for the shared block and all lower level
7659  * blocks are properly updated.
7660  *
7661  * If called with for_reloc == 0, may exit early with -EAGAIN
7662  */
7663 int btrfs_drop_snapshot(struct btrfs_root *root,
7664                          struct btrfs_block_rsv *block_rsv, int update_ref,
7665                          int for_reloc)
7666 {
7667         struct btrfs_path *path;
7668         struct btrfs_trans_handle *trans;
7669         struct btrfs_root *tree_root = root->fs_info->tree_root;
7670         struct btrfs_root_item *root_item = &root->root_item;
7671         struct walk_control *wc;
7672         struct btrfs_key key;
7673         int err = 0;
7674         int ret;
7675         int level;
7676         bool root_dropped = false;
7677
7678         path = btrfs_alloc_path();
7679         if (!path) {
7680                 err = -ENOMEM;
7681                 goto out;
7682         }
7683
7684         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7685         if (!wc) {
7686                 btrfs_free_path(path);
7687                 err = -ENOMEM;
7688                 goto out;
7689         }
7690
7691         trans = btrfs_start_transaction(tree_root, 0);
7692         if (IS_ERR(trans)) {
7693                 err = PTR_ERR(trans);
7694                 goto out_free;
7695         }
7696
7697         if (block_rsv)
7698                 trans->block_rsv = block_rsv;
7699
7700         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7701                 level = btrfs_header_level(root->node);
7702                 path->nodes[level] = btrfs_lock_root_node(root);
7703                 btrfs_set_lock_blocking(path->nodes[level]);
7704                 path->slots[level] = 0;
7705                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7706                 memset(&wc->update_progress, 0,
7707                        sizeof(wc->update_progress));
7708         } else {
7709                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7710                 memcpy(&wc->update_progress, &key,
7711                        sizeof(wc->update_progress));
7712
7713                 level = root_item->drop_level;
7714                 BUG_ON(level == 0);
7715                 path->lowest_level = level;
7716                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7717                 path->lowest_level = 0;
7718                 if (ret < 0) {
7719                         err = ret;
7720                         goto out_end_trans;
7721                 }
7722                 WARN_ON(ret > 0);
7723
7724                 /*
7725                  * unlock our path, this is safe because only this
7726                  * function is allowed to delete this snapshot
7727                  */
7728                 btrfs_unlock_up_safe(path, 0);
7729
7730                 level = btrfs_header_level(root->node);
7731                 while (1) {
7732                         btrfs_tree_lock(path->nodes[level]);
7733                         btrfs_set_lock_blocking(path->nodes[level]);
7734                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7735
7736                         ret = btrfs_lookup_extent_info(trans, root,
7737                                                 path->nodes[level]->start,
7738                                                 level, 1, &wc->refs[level],
7739                                                 &wc->flags[level]);
7740                         if (ret < 0) {
7741                                 err = ret;
7742                                 goto out_end_trans;
7743                         }
7744                         BUG_ON(wc->refs[level] == 0);
7745
7746                         if (level == root_item->drop_level)
7747                                 break;
7748
7749                         btrfs_tree_unlock(path->nodes[level]);
7750                         path->locks[level] = 0;
7751                         WARN_ON(wc->refs[level] != 1);
7752                         level--;
7753                 }
7754         }
7755
7756         wc->level = level;
7757         wc->shared_level = -1;
7758         wc->stage = DROP_REFERENCE;
7759         wc->update_ref = update_ref;
7760         wc->keep_locks = 0;
7761         wc->for_reloc = for_reloc;
7762         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7763
7764         while (1) {
7765
7766                 ret = walk_down_tree(trans, root, path, wc);
7767                 if (ret < 0) {
7768                         err = ret;
7769                         break;
7770                 }
7771
7772                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7773                 if (ret < 0) {
7774                         err = ret;
7775                         break;
7776                 }
7777
7778                 if (ret > 0) {
7779                         BUG_ON(wc->stage != DROP_REFERENCE);
7780                         break;
7781                 }
7782
7783                 if (wc->stage == DROP_REFERENCE) {
7784                         level = wc->level;
7785                         btrfs_node_key(path->nodes[level],
7786                                        &root_item->drop_progress,
7787                                        path->slots[level]);
7788                         root_item->drop_level = level;
7789                 }
7790
7791                 BUG_ON(wc->level == 0);
7792                 if (btrfs_should_end_transaction(trans, tree_root) ||
7793                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7794                         ret = btrfs_update_root(trans, tree_root,
7795                                                 &root->root_key,
7796                                                 root_item);
7797                         if (ret) {
7798                                 btrfs_abort_transaction(trans, tree_root, ret);
7799                                 err = ret;
7800                                 goto out_end_trans;
7801                         }
7802
7803                         btrfs_end_transaction_throttle(trans, tree_root);
7804                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7805                                 pr_debug("BTRFS: drop snapshot early exit\n");
7806                                 err = -EAGAIN;
7807                                 goto out_free;
7808                         }
7809
7810                         trans = btrfs_start_transaction(tree_root, 0);
7811                         if (IS_ERR(trans)) {
7812                                 err = PTR_ERR(trans);
7813                                 goto out_free;
7814                         }
7815                         if (block_rsv)
7816                                 trans->block_rsv = block_rsv;
7817                 }
7818         }
7819         btrfs_release_path(path);
7820         if (err)
7821                 goto out_end_trans;
7822
7823         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7824         if (ret) {
7825                 btrfs_abort_transaction(trans, tree_root, ret);
7826                 goto out_end_trans;
7827         }
7828
7829         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7830                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7831                                       NULL, NULL);
7832                 if (ret < 0) {
7833                         btrfs_abort_transaction(trans, tree_root, ret);
7834                         err = ret;
7835                         goto out_end_trans;
7836                 } else if (ret > 0) {
7837                         /* if we fail to delete the orphan item this time
7838                          * around, it'll get picked up the next time.
7839                          *
7840                          * The most common failure here is just -ENOENT.
7841                          */
7842                         btrfs_del_orphan_item(trans, tree_root,
7843                                               root->root_key.objectid);
7844                 }
7845         }
7846
7847         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
7848                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7849         } else {
7850                 free_extent_buffer(root->node);
7851                 free_extent_buffer(root->commit_root);
7852                 btrfs_put_fs_root(root);
7853         }
7854         root_dropped = true;
7855 out_end_trans:
7856         btrfs_end_transaction_throttle(trans, tree_root);
7857 out_free:
7858         kfree(wc);
7859         btrfs_free_path(path);
7860 out:
7861         /*
7862          * So if we need to stop dropping the snapshot for whatever reason we
7863          * need to make sure to add it back to the dead root list so that we
7864          * keep trying to do the work later.  This also cleans up roots if we
7865          * don't have it in the radix (like when we recover after a power fail
7866          * or unmount) so we don't leak memory.
7867          */
7868         if (!for_reloc && root_dropped == false)
7869                 btrfs_add_dead_root(root);
7870         if (err && err != -EAGAIN)
7871                 btrfs_std_error(root->fs_info, err);
7872         return err;
7873 }
7874
7875 /*
7876  * drop subtree rooted at tree block 'node'.
7877  *
7878  * NOTE: this function will unlock and release tree block 'node'
7879  * only used by relocation code
7880  */
7881 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7882                         struct btrfs_root *root,
7883                         struct extent_buffer *node,
7884                         struct extent_buffer *parent)
7885 {
7886         struct btrfs_path *path;
7887         struct walk_control *wc;
7888         int level;
7889         int parent_level;
7890         int ret = 0;
7891         int wret;
7892
7893         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7894
7895         path = btrfs_alloc_path();
7896         if (!path)
7897                 return -ENOMEM;
7898
7899         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7900         if (!wc) {
7901                 btrfs_free_path(path);
7902                 return -ENOMEM;
7903         }
7904
7905         btrfs_assert_tree_locked(parent);
7906         parent_level = btrfs_header_level(parent);
7907         extent_buffer_get(parent);
7908         path->nodes[parent_level] = parent;
7909         path->slots[parent_level] = btrfs_header_nritems(parent);
7910
7911         btrfs_assert_tree_locked(node);
7912         level = btrfs_header_level(node);
7913         path->nodes[level] = node;
7914         path->slots[level] = 0;
7915         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7916
7917         wc->refs[parent_level] = 1;
7918         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7919         wc->level = level;
7920         wc->shared_level = -1;
7921         wc->stage = DROP_REFERENCE;
7922         wc->update_ref = 0;
7923         wc->keep_locks = 1;
7924         wc->for_reloc = 1;
7925         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7926
7927         while (1) {
7928                 wret = walk_down_tree(trans, root, path, wc);
7929                 if (wret < 0) {
7930                         ret = wret;
7931                         break;
7932                 }
7933
7934                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7935                 if (wret < 0)
7936                         ret = wret;
7937                 if (wret != 0)
7938                         break;
7939         }
7940
7941         kfree(wc);
7942         btrfs_free_path(path);
7943         return ret;
7944 }
7945
7946 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7947 {
7948         u64 num_devices;
7949         u64 stripped;
7950
7951         /*
7952          * if restripe for this chunk_type is on pick target profile and
7953          * return, otherwise do the usual balance
7954          */
7955         stripped = get_restripe_target(root->fs_info, flags);
7956         if (stripped)
7957                 return extended_to_chunk(stripped);
7958
7959         /*
7960          * we add in the count of missing devices because we want
7961          * to make sure that any RAID levels on a degraded FS
7962          * continue to be honored.
7963          */
7964         num_devices = root->fs_info->fs_devices->rw_devices +
7965                 root->fs_info->fs_devices->missing_devices;
7966
7967         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7968                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7969                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7970
7971         if (num_devices == 1) {
7972                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7973                 stripped = flags & ~stripped;
7974
7975                 /* turn raid0 into single device chunks */
7976                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7977                         return stripped;
7978
7979                 /* turn mirroring into duplication */
7980                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7981                              BTRFS_BLOCK_GROUP_RAID10))
7982                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7983         } else {
7984                 /* they already had raid on here, just return */
7985                 if (flags & stripped)
7986                         return flags;
7987
7988                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7989                 stripped = flags & ~stripped;
7990
7991                 /* switch duplicated blocks with raid1 */
7992                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7993                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7994
7995                 /* this is drive concat, leave it alone */
7996         }
7997
7998         return flags;
7999 }
8000
8001 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8002 {
8003         struct btrfs_space_info *sinfo = cache->space_info;
8004         u64 num_bytes;
8005         u64 min_allocable_bytes;
8006         int ret = -ENOSPC;
8007
8008
8009         /*
8010          * We need some metadata space and system metadata space for
8011          * allocating chunks in some corner cases until we force to set
8012          * it to be readonly.
8013          */
8014         if ((sinfo->flags &
8015              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8016             !force)
8017                 min_allocable_bytes = 1 * 1024 * 1024;
8018         else
8019                 min_allocable_bytes = 0;
8020
8021         spin_lock(&sinfo->lock);
8022         spin_lock(&cache->lock);
8023
8024         if (cache->ro) {
8025                 ret = 0;
8026                 goto out;
8027         }
8028
8029         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8030                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8031
8032         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8033             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8034             min_allocable_bytes <= sinfo->total_bytes) {
8035                 sinfo->bytes_readonly += num_bytes;
8036                 cache->ro = 1;
8037                 ret = 0;
8038         }
8039 out:
8040         spin_unlock(&cache->lock);
8041         spin_unlock(&sinfo->lock);
8042         return ret;
8043 }
8044
8045 int btrfs_set_block_group_ro(struct btrfs_root *root,
8046                              struct btrfs_block_group_cache *cache)
8047
8048 {
8049         struct btrfs_trans_handle *trans;
8050         u64 alloc_flags;
8051         int ret;
8052
8053         BUG_ON(cache->ro);
8054
8055         trans = btrfs_join_transaction(root);
8056         if (IS_ERR(trans))
8057                 return PTR_ERR(trans);
8058
8059         alloc_flags = update_block_group_flags(root, cache->flags);
8060         if (alloc_flags != cache->flags) {
8061                 ret = do_chunk_alloc(trans, root, alloc_flags,
8062                                      CHUNK_ALLOC_FORCE);
8063                 if (ret < 0)
8064                         goto out;
8065         }
8066
8067         ret = set_block_group_ro(cache, 0);
8068         if (!ret)
8069                 goto out;
8070         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8071         ret = do_chunk_alloc(trans, root, alloc_flags,
8072                              CHUNK_ALLOC_FORCE);
8073         if (ret < 0)
8074                 goto out;
8075         ret = set_block_group_ro(cache, 0);
8076 out:
8077         btrfs_end_transaction(trans, root);
8078         return ret;
8079 }
8080
8081 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8082                             struct btrfs_root *root, u64 type)
8083 {
8084         u64 alloc_flags = get_alloc_profile(root, type);
8085         return do_chunk_alloc(trans, root, alloc_flags,
8086                               CHUNK_ALLOC_FORCE);
8087 }
8088
8089 /*
8090  * helper to account the unused space of all the readonly block group in the
8091  * list. takes mirrors into account.
8092  */
8093 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8094 {
8095         struct btrfs_block_group_cache *block_group;
8096         u64 free_bytes = 0;
8097         int factor;
8098
8099         list_for_each_entry(block_group, groups_list, list) {
8100                 spin_lock(&block_group->lock);
8101
8102                 if (!block_group->ro) {
8103                         spin_unlock(&block_group->lock);
8104                         continue;
8105                 }
8106
8107                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8108                                           BTRFS_BLOCK_GROUP_RAID10 |
8109                                           BTRFS_BLOCK_GROUP_DUP))
8110                         factor = 2;
8111                 else
8112                         factor = 1;
8113
8114                 free_bytes += (block_group->key.offset -
8115                                btrfs_block_group_used(&block_group->item)) *
8116                                factor;
8117
8118                 spin_unlock(&block_group->lock);
8119         }
8120
8121         return free_bytes;
8122 }
8123
8124 /*
8125  * helper to account the unused space of all the readonly block group in the
8126  * space_info. takes mirrors into account.
8127  */
8128 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8129 {
8130         int i;
8131         u64 free_bytes = 0;
8132
8133         spin_lock(&sinfo->lock);
8134
8135         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8136                 if (!list_empty(&sinfo->block_groups[i]))
8137                         free_bytes += __btrfs_get_ro_block_group_free_space(
8138                                                 &sinfo->block_groups[i]);
8139
8140         spin_unlock(&sinfo->lock);
8141
8142         return free_bytes;
8143 }
8144
8145 void btrfs_set_block_group_rw(struct btrfs_root *root,
8146                               struct btrfs_block_group_cache *cache)
8147 {
8148         struct btrfs_space_info *sinfo = cache->space_info;
8149         u64 num_bytes;
8150
8151         BUG_ON(!cache->ro);
8152
8153         spin_lock(&sinfo->lock);
8154         spin_lock(&cache->lock);
8155         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8156                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8157         sinfo->bytes_readonly -= num_bytes;
8158         cache->ro = 0;
8159         spin_unlock(&cache->lock);
8160         spin_unlock(&sinfo->lock);
8161 }
8162
8163 /*
8164  * checks to see if its even possible to relocate this block group.
8165  *
8166  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8167  * ok to go ahead and try.
8168  */
8169 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8170 {
8171         struct btrfs_block_group_cache *block_group;
8172         struct btrfs_space_info *space_info;
8173         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8174         struct btrfs_device *device;
8175         struct btrfs_trans_handle *trans;
8176         u64 min_free;
8177         u64 dev_min = 1;
8178         u64 dev_nr = 0;
8179         u64 target;
8180         int index;
8181         int full = 0;
8182         int ret = 0;
8183
8184         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8185
8186         /* odd, couldn't find the block group, leave it alone */
8187         if (!block_group)
8188                 return -1;
8189
8190         min_free = btrfs_block_group_used(&block_group->item);
8191
8192         /* no bytes used, we're good */
8193         if (!min_free)
8194                 goto out;
8195
8196         space_info = block_group->space_info;
8197         spin_lock(&space_info->lock);
8198
8199         full = space_info->full;
8200
8201         /*
8202          * if this is the last block group we have in this space, we can't
8203          * relocate it unless we're able to allocate a new chunk below.
8204          *
8205          * Otherwise, we need to make sure we have room in the space to handle
8206          * all of the extents from this block group.  If we can, we're good
8207          */
8208         if ((space_info->total_bytes != block_group->key.offset) &&
8209             (space_info->bytes_used + space_info->bytes_reserved +
8210              space_info->bytes_pinned + space_info->bytes_readonly +
8211              min_free < space_info->total_bytes)) {
8212                 spin_unlock(&space_info->lock);
8213                 goto out;
8214         }
8215         spin_unlock(&space_info->lock);
8216
8217         /*
8218          * ok we don't have enough space, but maybe we have free space on our
8219          * devices to allocate new chunks for relocation, so loop through our
8220          * alloc devices and guess if we have enough space.  if this block
8221          * group is going to be restriped, run checks against the target
8222          * profile instead of the current one.
8223          */
8224         ret = -1;
8225
8226         /*
8227          * index:
8228          *      0: raid10
8229          *      1: raid1
8230          *      2: dup
8231          *      3: raid0
8232          *      4: single
8233          */
8234         target = get_restripe_target(root->fs_info, block_group->flags);
8235         if (target) {
8236                 index = __get_raid_index(extended_to_chunk(target));
8237         } else {
8238                 /*
8239                  * this is just a balance, so if we were marked as full
8240                  * we know there is no space for a new chunk
8241                  */
8242                 if (full)
8243                         goto out;
8244
8245                 index = get_block_group_index(block_group);
8246         }
8247
8248         if (index == BTRFS_RAID_RAID10) {
8249                 dev_min = 4;
8250                 /* Divide by 2 */
8251                 min_free >>= 1;
8252         } else if (index == BTRFS_RAID_RAID1) {
8253                 dev_min = 2;
8254         } else if (index == BTRFS_RAID_DUP) {
8255                 /* Multiply by 2 */
8256                 min_free <<= 1;
8257         } else if (index == BTRFS_RAID_RAID0) {
8258                 dev_min = fs_devices->rw_devices;
8259                 do_div(min_free, dev_min);
8260         }
8261
8262         /* We need to do this so that we can look at pending chunks */
8263         trans = btrfs_join_transaction(root);
8264         if (IS_ERR(trans)) {
8265                 ret = PTR_ERR(trans);
8266                 goto out;
8267         }
8268
8269         mutex_lock(&root->fs_info->chunk_mutex);
8270         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8271                 u64 dev_offset;
8272
8273                 /*
8274                  * check to make sure we can actually find a chunk with enough
8275                  * space to fit our block group in.
8276                  */
8277                 if (device->total_bytes > device->bytes_used + min_free &&
8278                     !device->is_tgtdev_for_dev_replace) {
8279                         ret = find_free_dev_extent(trans, device, min_free,
8280                                                    &dev_offset, NULL);
8281                         if (!ret)
8282                                 dev_nr++;
8283
8284                         if (dev_nr >= dev_min)
8285                                 break;
8286
8287                         ret = -1;
8288                 }
8289         }
8290         mutex_unlock(&root->fs_info->chunk_mutex);
8291         btrfs_end_transaction(trans, root);
8292 out:
8293         btrfs_put_block_group(block_group);
8294         return ret;
8295 }
8296
8297 static int find_first_block_group(struct btrfs_root *root,
8298                 struct btrfs_path *path, struct btrfs_key *key)
8299 {
8300         int ret = 0;
8301         struct btrfs_key found_key;
8302         struct extent_buffer *leaf;
8303         int slot;
8304
8305         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8306         if (ret < 0)
8307                 goto out;
8308
8309         while (1) {
8310                 slot = path->slots[0];
8311                 leaf = path->nodes[0];
8312                 if (slot >= btrfs_header_nritems(leaf)) {
8313                         ret = btrfs_next_leaf(root, path);
8314                         if (ret == 0)
8315                                 continue;
8316                         if (ret < 0)
8317                                 goto out;
8318                         break;
8319                 }
8320                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8321
8322                 if (found_key.objectid >= key->objectid &&
8323                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8324                         ret = 0;
8325                         goto out;
8326                 }
8327                 path->slots[0]++;
8328         }
8329 out:
8330         return ret;
8331 }
8332
8333 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8334 {
8335         struct btrfs_block_group_cache *block_group;
8336         u64 last = 0;
8337
8338         while (1) {
8339                 struct inode *inode;
8340
8341                 block_group = btrfs_lookup_first_block_group(info, last);
8342                 while (block_group) {
8343                         spin_lock(&block_group->lock);
8344                         if (block_group->iref)
8345                                 break;
8346                         spin_unlock(&block_group->lock);
8347                         block_group = next_block_group(info->tree_root,
8348                                                        block_group);
8349                 }
8350                 if (!block_group) {
8351                         if (last == 0)
8352                                 break;
8353                         last = 0;
8354                         continue;
8355                 }
8356
8357                 inode = block_group->inode;
8358                 block_group->iref = 0;
8359                 block_group->inode = NULL;
8360                 spin_unlock(&block_group->lock);
8361                 iput(inode);
8362                 last = block_group->key.objectid + block_group->key.offset;
8363                 btrfs_put_block_group(block_group);
8364         }
8365 }
8366
8367 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8368 {
8369         struct btrfs_block_group_cache *block_group;
8370         struct btrfs_space_info *space_info;
8371         struct btrfs_caching_control *caching_ctl;
8372         struct rb_node *n;
8373
8374         down_write(&info->commit_root_sem);
8375         while (!list_empty(&info->caching_block_groups)) {
8376                 caching_ctl = list_entry(info->caching_block_groups.next,
8377                                          struct btrfs_caching_control, list);
8378                 list_del(&caching_ctl->list);
8379                 put_caching_control(caching_ctl);
8380         }
8381         up_write(&info->commit_root_sem);
8382
8383         spin_lock(&info->block_group_cache_lock);
8384         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8385                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8386                                        cache_node);
8387                 rb_erase(&block_group->cache_node,
8388                          &info->block_group_cache_tree);
8389                 spin_unlock(&info->block_group_cache_lock);
8390
8391                 down_write(&block_group->space_info->groups_sem);
8392                 list_del(&block_group->list);
8393                 up_write(&block_group->space_info->groups_sem);
8394
8395                 if (block_group->cached == BTRFS_CACHE_STARTED)
8396                         wait_block_group_cache_done(block_group);
8397
8398                 /*
8399                  * We haven't cached this block group, which means we could
8400                  * possibly have excluded extents on this block group.
8401                  */
8402                 if (block_group->cached == BTRFS_CACHE_NO ||
8403                     block_group->cached == BTRFS_CACHE_ERROR)
8404                         free_excluded_extents(info->extent_root, block_group);
8405
8406                 btrfs_remove_free_space_cache(block_group);
8407                 btrfs_put_block_group(block_group);
8408
8409                 spin_lock(&info->block_group_cache_lock);
8410         }
8411         spin_unlock(&info->block_group_cache_lock);
8412
8413         /* now that all the block groups are freed, go through and
8414          * free all the space_info structs.  This is only called during
8415          * the final stages of unmount, and so we know nobody is
8416          * using them.  We call synchronize_rcu() once before we start,
8417          * just to be on the safe side.
8418          */
8419         synchronize_rcu();
8420
8421         release_global_block_rsv(info);
8422
8423         while (!list_empty(&info->space_info)) {
8424                 int i;
8425
8426                 space_info = list_entry(info->space_info.next,
8427                                         struct btrfs_space_info,
8428                                         list);
8429                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8430                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8431                             space_info->bytes_reserved > 0 ||
8432                             space_info->bytes_may_use > 0)) {
8433                                 dump_space_info(space_info, 0, 0);
8434                         }
8435                 }
8436                 list_del(&space_info->list);
8437                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8438                         struct kobject *kobj;
8439                         kobj = &space_info->block_group_kobjs[i];
8440                         if (kobj->parent) {
8441                                 kobject_del(kobj);
8442                                 kobject_put(kobj);
8443                         }
8444                 }
8445                 kobject_del(&space_info->kobj);
8446                 kobject_put(&space_info->kobj);
8447         }
8448         return 0;
8449 }
8450
8451 static void __link_block_group(struct btrfs_space_info *space_info,
8452                                struct btrfs_block_group_cache *cache)
8453 {
8454         int index = get_block_group_index(cache);
8455         bool first = false;
8456
8457         down_write(&space_info->groups_sem);
8458         if (list_empty(&space_info->block_groups[index]))
8459                 first = true;
8460         list_add_tail(&cache->list, &space_info->block_groups[index]);
8461         up_write(&space_info->groups_sem);
8462
8463         if (first) {
8464                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8465                 int ret;
8466
8467                 kobject_get(&space_info->kobj); /* put in release */
8468                 ret = kobject_add(kobj, &space_info->kobj, "%s",
8469                                   get_raid_name(index));
8470                 if (ret) {
8471                         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8472                         kobject_put(&space_info->kobj);
8473                 }
8474         }
8475 }
8476
8477 static struct btrfs_block_group_cache *
8478 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8479 {
8480         struct btrfs_block_group_cache *cache;
8481
8482         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8483         if (!cache)
8484                 return NULL;
8485
8486         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8487                                         GFP_NOFS);
8488         if (!cache->free_space_ctl) {
8489                 kfree(cache);
8490                 return NULL;
8491         }
8492
8493         cache->key.objectid = start;
8494         cache->key.offset = size;
8495         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8496
8497         cache->sectorsize = root->sectorsize;
8498         cache->fs_info = root->fs_info;
8499         cache->full_stripe_len = btrfs_full_stripe_len(root,
8500                                                &root->fs_info->mapping_tree,
8501                                                start);
8502         atomic_set(&cache->count, 1);
8503         spin_lock_init(&cache->lock);
8504         INIT_LIST_HEAD(&cache->list);
8505         INIT_LIST_HEAD(&cache->cluster_list);
8506         INIT_LIST_HEAD(&cache->new_bg_list);
8507         btrfs_init_free_space_ctl(cache);
8508
8509         return cache;
8510 }
8511
8512 int btrfs_read_block_groups(struct btrfs_root *root)
8513 {
8514         struct btrfs_path *path;
8515         int ret;
8516         struct btrfs_block_group_cache *cache;
8517         struct btrfs_fs_info *info = root->fs_info;
8518         struct btrfs_space_info *space_info;
8519         struct btrfs_key key;
8520         struct btrfs_key found_key;
8521         struct extent_buffer *leaf;
8522         int need_clear = 0;
8523         u64 cache_gen;
8524
8525         root = info->extent_root;
8526         key.objectid = 0;
8527         key.offset = 0;
8528         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8529         path = btrfs_alloc_path();
8530         if (!path)
8531                 return -ENOMEM;
8532         path->reada = 1;
8533
8534         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8535         if (btrfs_test_opt(root, SPACE_CACHE) &&
8536             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8537                 need_clear = 1;
8538         if (btrfs_test_opt(root, CLEAR_CACHE))
8539                 need_clear = 1;
8540
8541         while (1) {
8542                 ret = find_first_block_group(root, path, &key);
8543                 if (ret > 0)
8544                         break;
8545                 if (ret != 0)
8546                         goto error;
8547
8548                 leaf = path->nodes[0];
8549                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8550
8551                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8552                                                        found_key.offset);
8553                 if (!cache) {
8554                         ret = -ENOMEM;
8555                         goto error;
8556                 }
8557
8558                 if (need_clear) {
8559                         /*
8560                          * When we mount with old space cache, we need to
8561                          * set BTRFS_DC_CLEAR and set dirty flag.
8562                          *
8563                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8564                          *    truncate the old free space cache inode and
8565                          *    setup a new one.
8566                          * b) Setting 'dirty flag' makes sure that we flush
8567                          *    the new space cache info onto disk.
8568                          */
8569                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8570                         if (btrfs_test_opt(root, SPACE_CACHE))
8571                                 cache->dirty = 1;
8572                 }
8573
8574                 read_extent_buffer(leaf, &cache->item,
8575                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8576                                    sizeof(cache->item));
8577                 cache->flags = btrfs_block_group_flags(&cache->item);
8578
8579                 key.objectid = found_key.objectid + found_key.offset;
8580                 btrfs_release_path(path);
8581
8582                 /*
8583                  * We need to exclude the super stripes now so that the space
8584                  * info has super bytes accounted for, otherwise we'll think
8585                  * we have more space than we actually do.
8586                  */
8587                 ret = exclude_super_stripes(root, cache);
8588                 if (ret) {
8589                         /*
8590                          * We may have excluded something, so call this just in
8591                          * case.
8592                          */
8593                         free_excluded_extents(root, cache);
8594                         btrfs_put_block_group(cache);
8595                         goto error;
8596                 }
8597
8598                 /*
8599                  * check for two cases, either we are full, and therefore
8600                  * don't need to bother with the caching work since we won't
8601                  * find any space, or we are empty, and we can just add all
8602                  * the space in and be done with it.  This saves us _alot_ of
8603                  * time, particularly in the full case.
8604                  */
8605                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8606                         cache->last_byte_to_unpin = (u64)-1;
8607                         cache->cached = BTRFS_CACHE_FINISHED;
8608                         free_excluded_extents(root, cache);
8609                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8610                         cache->last_byte_to_unpin = (u64)-1;
8611                         cache->cached = BTRFS_CACHE_FINISHED;
8612                         add_new_free_space(cache, root->fs_info,
8613                                            found_key.objectid,
8614                                            found_key.objectid +
8615                                            found_key.offset);
8616                         free_excluded_extents(root, cache);
8617                 }
8618
8619                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8620                 if (ret) {
8621                         btrfs_remove_free_space_cache(cache);
8622                         btrfs_put_block_group(cache);
8623                         goto error;
8624                 }
8625
8626                 ret = update_space_info(info, cache->flags, found_key.offset,
8627                                         btrfs_block_group_used(&cache->item),
8628                                         &space_info);
8629                 if (ret) {
8630                         btrfs_remove_free_space_cache(cache);
8631                         spin_lock(&info->block_group_cache_lock);
8632                         rb_erase(&cache->cache_node,
8633                                  &info->block_group_cache_tree);
8634                         spin_unlock(&info->block_group_cache_lock);
8635                         btrfs_put_block_group(cache);
8636                         goto error;
8637                 }
8638
8639                 cache->space_info = space_info;
8640                 spin_lock(&cache->space_info->lock);
8641                 cache->space_info->bytes_readonly += cache->bytes_super;
8642                 spin_unlock(&cache->space_info->lock);
8643
8644                 __link_block_group(space_info, cache);
8645
8646                 set_avail_alloc_bits(root->fs_info, cache->flags);
8647                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8648                         set_block_group_ro(cache, 1);
8649         }
8650
8651         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8652                 if (!(get_alloc_profile(root, space_info->flags) &
8653                       (BTRFS_BLOCK_GROUP_RAID10 |
8654                        BTRFS_BLOCK_GROUP_RAID1 |
8655                        BTRFS_BLOCK_GROUP_RAID5 |
8656                        BTRFS_BLOCK_GROUP_RAID6 |
8657                        BTRFS_BLOCK_GROUP_DUP)))
8658                         continue;
8659                 /*
8660                  * avoid allocating from un-mirrored block group if there are
8661                  * mirrored block groups.
8662                  */
8663                 list_for_each_entry(cache,
8664                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8665                                 list)
8666                         set_block_group_ro(cache, 1);
8667                 list_for_each_entry(cache,
8668                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8669                                 list)
8670                         set_block_group_ro(cache, 1);
8671         }
8672
8673         init_global_block_rsv(info);
8674         ret = 0;
8675 error:
8676         btrfs_free_path(path);
8677         return ret;
8678 }
8679
8680 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8681                                        struct btrfs_root *root)
8682 {
8683         struct btrfs_block_group_cache *block_group, *tmp;
8684         struct btrfs_root *extent_root = root->fs_info->extent_root;
8685         struct btrfs_block_group_item item;
8686         struct btrfs_key key;
8687         int ret = 0;
8688
8689         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8690                                  new_bg_list) {
8691                 list_del_init(&block_group->new_bg_list);
8692
8693                 if (ret)
8694                         continue;
8695
8696                 spin_lock(&block_group->lock);
8697                 memcpy(&item, &block_group->item, sizeof(item));
8698                 memcpy(&key, &block_group->key, sizeof(key));
8699                 spin_unlock(&block_group->lock);
8700
8701                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8702                                         sizeof(item));
8703                 if (ret)
8704                         btrfs_abort_transaction(trans, extent_root, ret);
8705                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8706                                                key.objectid, key.offset);
8707                 if (ret)
8708                         btrfs_abort_transaction(trans, extent_root, ret);
8709         }
8710 }
8711
8712 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8713                            struct btrfs_root *root, u64 bytes_used,
8714                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8715                            u64 size)
8716 {
8717         int ret;
8718         struct btrfs_root *extent_root;
8719         struct btrfs_block_group_cache *cache;
8720
8721         extent_root = root->fs_info->extent_root;
8722
8723         btrfs_set_log_full_commit(root->fs_info, trans);
8724
8725         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
8726         if (!cache)
8727                 return -ENOMEM;
8728
8729         btrfs_set_block_group_used(&cache->item, bytes_used);
8730         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8731         btrfs_set_block_group_flags(&cache->item, type);
8732
8733         cache->flags = type;
8734         cache->last_byte_to_unpin = (u64)-1;
8735         cache->cached = BTRFS_CACHE_FINISHED;
8736         ret = exclude_super_stripes(root, cache);
8737         if (ret) {
8738                 /*
8739                  * We may have excluded something, so call this just in
8740                  * case.
8741                  */
8742                 free_excluded_extents(root, cache);
8743                 btrfs_put_block_group(cache);
8744                 return ret;
8745         }
8746
8747         add_new_free_space(cache, root->fs_info, chunk_offset,
8748                            chunk_offset + size);
8749
8750         free_excluded_extents(root, cache);
8751
8752         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8753         if (ret) {
8754                 btrfs_remove_free_space_cache(cache);
8755                 btrfs_put_block_group(cache);
8756                 return ret;
8757         }
8758
8759         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8760                                 &cache->space_info);
8761         if (ret) {
8762                 btrfs_remove_free_space_cache(cache);
8763                 spin_lock(&root->fs_info->block_group_cache_lock);
8764                 rb_erase(&cache->cache_node,
8765                          &root->fs_info->block_group_cache_tree);
8766                 spin_unlock(&root->fs_info->block_group_cache_lock);
8767                 btrfs_put_block_group(cache);
8768                 return ret;
8769         }
8770         update_global_block_rsv(root->fs_info);
8771
8772         spin_lock(&cache->space_info->lock);
8773         cache->space_info->bytes_readonly += cache->bytes_super;
8774         spin_unlock(&cache->space_info->lock);
8775
8776         __link_block_group(cache->space_info, cache);
8777
8778         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8779
8780         set_avail_alloc_bits(extent_root->fs_info, type);
8781
8782         return 0;
8783 }
8784
8785 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8786 {
8787         u64 extra_flags = chunk_to_extended(flags) &
8788                                 BTRFS_EXTENDED_PROFILE_MASK;
8789
8790         write_seqlock(&fs_info->profiles_lock);
8791         if (flags & BTRFS_BLOCK_GROUP_DATA)
8792                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8793         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8794                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8795         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8796                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8797         write_sequnlock(&fs_info->profiles_lock);
8798 }
8799
8800 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8801                              struct btrfs_root *root, u64 group_start)
8802 {
8803         struct btrfs_path *path;
8804         struct btrfs_block_group_cache *block_group;
8805         struct btrfs_free_cluster *cluster;
8806         struct btrfs_root *tree_root = root->fs_info->tree_root;
8807         struct btrfs_key key;
8808         struct inode *inode;
8809         int ret;
8810         int index;
8811         int factor;
8812
8813         root = root->fs_info->extent_root;
8814
8815         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8816         BUG_ON(!block_group);
8817         BUG_ON(!block_group->ro);
8818
8819         /*
8820          * Free the reserved super bytes from this block group before
8821          * remove it.
8822          */
8823         free_excluded_extents(root, block_group);
8824
8825         memcpy(&key, &block_group->key, sizeof(key));
8826         index = get_block_group_index(block_group);
8827         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8828                                   BTRFS_BLOCK_GROUP_RAID1 |
8829                                   BTRFS_BLOCK_GROUP_RAID10))
8830                 factor = 2;
8831         else
8832                 factor = 1;
8833
8834         /* make sure this block group isn't part of an allocation cluster */
8835         cluster = &root->fs_info->data_alloc_cluster;
8836         spin_lock(&cluster->refill_lock);
8837         btrfs_return_cluster_to_free_space(block_group, cluster);
8838         spin_unlock(&cluster->refill_lock);
8839
8840         /*
8841          * make sure this block group isn't part of a metadata
8842          * allocation cluster
8843          */
8844         cluster = &root->fs_info->meta_alloc_cluster;
8845         spin_lock(&cluster->refill_lock);
8846         btrfs_return_cluster_to_free_space(block_group, cluster);
8847         spin_unlock(&cluster->refill_lock);
8848
8849         path = btrfs_alloc_path();
8850         if (!path) {
8851                 ret = -ENOMEM;
8852                 goto out;
8853         }
8854
8855         inode = lookup_free_space_inode(tree_root, block_group, path);
8856         if (!IS_ERR(inode)) {
8857                 ret = btrfs_orphan_add(trans, inode);
8858                 if (ret) {
8859                         btrfs_add_delayed_iput(inode);
8860                         goto out;
8861                 }
8862                 clear_nlink(inode);
8863                 /* One for the block groups ref */
8864                 spin_lock(&block_group->lock);
8865                 if (block_group->iref) {
8866                         block_group->iref = 0;
8867                         block_group->inode = NULL;
8868                         spin_unlock(&block_group->lock);
8869                         iput(inode);
8870                 } else {
8871                         spin_unlock(&block_group->lock);
8872                 }
8873                 /* One for our lookup ref */
8874                 btrfs_add_delayed_iput(inode);
8875         }
8876
8877         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8878         key.offset = block_group->key.objectid;
8879         key.type = 0;
8880
8881         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8882         if (ret < 0)
8883                 goto out;
8884         if (ret > 0)
8885                 btrfs_release_path(path);
8886         if (ret == 0) {
8887                 ret = btrfs_del_item(trans, tree_root, path);
8888                 if (ret)
8889                         goto out;
8890                 btrfs_release_path(path);
8891         }
8892
8893         spin_lock(&root->fs_info->block_group_cache_lock);
8894         rb_erase(&block_group->cache_node,
8895                  &root->fs_info->block_group_cache_tree);
8896
8897         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8898                 root->fs_info->first_logical_byte = (u64)-1;
8899         spin_unlock(&root->fs_info->block_group_cache_lock);
8900
8901         down_write(&block_group->space_info->groups_sem);
8902         /*
8903          * we must use list_del_init so people can check to see if they
8904          * are still on the list after taking the semaphore
8905          */
8906         list_del_init(&block_group->list);
8907         if (list_empty(&block_group->space_info->block_groups[index])) {
8908                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8909                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8910                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8911         }
8912         up_write(&block_group->space_info->groups_sem);
8913
8914         if (block_group->cached == BTRFS_CACHE_STARTED)
8915                 wait_block_group_cache_done(block_group);
8916
8917         btrfs_remove_free_space_cache(block_group);
8918
8919         spin_lock(&block_group->space_info->lock);
8920         block_group->space_info->total_bytes -= block_group->key.offset;
8921         block_group->space_info->bytes_readonly -= block_group->key.offset;
8922         block_group->space_info->disk_total -= block_group->key.offset * factor;
8923         spin_unlock(&block_group->space_info->lock);
8924
8925         memcpy(&key, &block_group->key, sizeof(key));
8926
8927         btrfs_clear_space_info_full(root->fs_info);
8928
8929         btrfs_put_block_group(block_group);
8930         btrfs_put_block_group(block_group);
8931
8932         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8933         if (ret > 0)
8934                 ret = -EIO;
8935         if (ret < 0)
8936                 goto out;
8937
8938         ret = btrfs_del_item(trans, root, path);
8939 out:
8940         btrfs_free_path(path);
8941         return ret;
8942 }
8943
8944 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8945 {
8946         struct btrfs_space_info *space_info;
8947         struct btrfs_super_block *disk_super;
8948         u64 features;
8949         u64 flags;
8950         int mixed = 0;
8951         int ret;
8952
8953         disk_super = fs_info->super_copy;
8954         if (!btrfs_super_root(disk_super))
8955                 return 1;
8956
8957         features = btrfs_super_incompat_flags(disk_super);
8958         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8959                 mixed = 1;
8960
8961         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8962         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8963         if (ret)
8964                 goto out;
8965
8966         if (mixed) {
8967                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8968                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8969         } else {
8970                 flags = BTRFS_BLOCK_GROUP_METADATA;
8971                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8972                 if (ret)
8973                         goto out;
8974
8975                 flags = BTRFS_BLOCK_GROUP_DATA;
8976                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8977         }
8978 out:
8979         return ret;
8980 }
8981
8982 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8983 {
8984         return unpin_extent_range(root, start, end);
8985 }
8986
8987 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8988                                u64 num_bytes, u64 *actual_bytes)
8989 {
8990         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8991 }
8992
8993 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8994 {
8995         struct btrfs_fs_info *fs_info = root->fs_info;
8996         struct btrfs_block_group_cache *cache = NULL;
8997         u64 group_trimmed;
8998         u64 start;
8999         u64 end;
9000         u64 trimmed = 0;
9001         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9002         int ret = 0;
9003
9004         /*
9005          * try to trim all FS space, our block group may start from non-zero.
9006          */
9007         if (range->len == total_bytes)
9008                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9009         else
9010                 cache = btrfs_lookup_block_group(fs_info, range->start);
9011
9012         while (cache) {
9013                 if (cache->key.objectid >= (range->start + range->len)) {
9014                         btrfs_put_block_group(cache);
9015                         break;
9016                 }
9017
9018                 start = max(range->start, cache->key.objectid);
9019                 end = min(range->start + range->len,
9020                                 cache->key.objectid + cache->key.offset);
9021
9022                 if (end - start >= range->minlen) {
9023                         if (!block_group_cache_done(cache)) {
9024                                 ret = cache_block_group(cache, 0);
9025                                 if (ret) {
9026                                         btrfs_put_block_group(cache);
9027                                         break;
9028                                 }
9029                                 ret = wait_block_group_cache_done(cache);
9030                                 if (ret) {
9031                                         btrfs_put_block_group(cache);
9032                                         break;
9033                                 }
9034                         }
9035                         ret = btrfs_trim_block_group(cache,
9036                                                      &group_trimmed,
9037                                                      start,
9038                                                      end,
9039                                                      range->minlen);
9040
9041                         trimmed += group_trimmed;
9042                         if (ret) {
9043                                 btrfs_put_block_group(cache);
9044                                 break;
9045                         }
9046                 }
9047
9048                 cache = next_block_group(fs_info->tree_root, cache);
9049         }
9050
9051         range->len = trimmed;
9052         return ret;
9053 }
9054
9055 /*
9056  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
9057  * they are used to prevent the some tasks writing data into the page cache
9058  * by nocow before the subvolume is snapshoted, but flush the data into
9059  * the disk after the snapshot creation.
9060  */
9061 void btrfs_end_nocow_write(struct btrfs_root *root)
9062 {
9063         percpu_counter_dec(&root->subv_writers->counter);
9064         /*
9065          * Make sure counter is updated before we wake up
9066          * waiters.
9067          */
9068         smp_mb();
9069         if (waitqueue_active(&root->subv_writers->wait))
9070                 wake_up(&root->subv_writers->wait);
9071 }
9072
9073 int btrfs_start_nocow_write(struct btrfs_root *root)
9074 {
9075         if (unlikely(atomic_read(&root->will_be_snapshoted)))
9076                 return 0;
9077
9078         percpu_counter_inc(&root->subv_writers->counter);
9079         /*
9080          * Make sure counter is updated before we check for snapshot creation.
9081          */
9082         smp_mb();
9083         if (unlikely(atomic_read(&root->will_be_snapshoted))) {
9084                 btrfs_end_nocow_write(root);
9085                 return 0;
9086         }
9087         return 1;
9088 }