regulator: qcom-smd: Add support for PMA8084
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins);
99 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
100                           struct btrfs_root *extent_root, u64 flags,
101                           int force);
102 static int find_next_key(struct btrfs_path *path, int level,
103                          struct btrfs_key *key);
104 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
105                             int dump_block_groups);
106 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
107                                        u64 num_bytes, int reserve,
108                                        int delalloc);
109 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
110                                u64 num_bytes);
111 int btrfs_pin_extent(struct btrfs_root *root,
112                      u64 bytenr, u64 num_bytes, int reserved);
113
114 static noinline int
115 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 {
117         smp_mb();
118         return cache->cached == BTRFS_CACHE_FINISHED ||
119                 cache->cached == BTRFS_CACHE_ERROR;
120 }
121
122 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
123 {
124         return (cache->flags & bits) == bits;
125 }
126
127 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
128 {
129         atomic_inc(&cache->count);
130 }
131
132 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
133 {
134         if (atomic_dec_and_test(&cache->count)) {
135                 WARN_ON(cache->pinned > 0);
136                 WARN_ON(cache->reserved > 0);
137                 kfree(cache->free_space_ctl);
138                 kfree(cache);
139         }
140 }
141
142 /*
143  * this adds the block group to the fs_info rb tree for the block group
144  * cache
145  */
146 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
147                                 struct btrfs_block_group_cache *block_group)
148 {
149         struct rb_node **p;
150         struct rb_node *parent = NULL;
151         struct btrfs_block_group_cache *cache;
152
153         spin_lock(&info->block_group_cache_lock);
154         p = &info->block_group_cache_tree.rb_node;
155
156         while (*p) {
157                 parent = *p;
158                 cache = rb_entry(parent, struct btrfs_block_group_cache,
159                                  cache_node);
160                 if (block_group->key.objectid < cache->key.objectid) {
161                         p = &(*p)->rb_left;
162                 } else if (block_group->key.objectid > cache->key.objectid) {
163                         p = &(*p)->rb_right;
164                 } else {
165                         spin_unlock(&info->block_group_cache_lock);
166                         return -EEXIST;
167                 }
168         }
169
170         rb_link_node(&block_group->cache_node, parent, p);
171         rb_insert_color(&block_group->cache_node,
172                         &info->block_group_cache_tree);
173
174         if (info->first_logical_byte > block_group->key.objectid)
175                 info->first_logical_byte = block_group->key.objectid;
176
177         spin_unlock(&info->block_group_cache_lock);
178
179         return 0;
180 }
181
182 /*
183  * This will return the block group at or after bytenr if contains is 0, else
184  * it will return the block group that contains the bytenr
185  */
186 static struct btrfs_block_group_cache *
187 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
188                               int contains)
189 {
190         struct btrfs_block_group_cache *cache, *ret = NULL;
191         struct rb_node *n;
192         u64 end, start;
193
194         spin_lock(&info->block_group_cache_lock);
195         n = info->block_group_cache_tree.rb_node;
196
197         while (n) {
198                 cache = rb_entry(n, struct btrfs_block_group_cache,
199                                  cache_node);
200                 end = cache->key.objectid + cache->key.offset - 1;
201                 start = cache->key.objectid;
202
203                 if (bytenr < start) {
204                         if (!contains && (!ret || start < ret->key.objectid))
205                                 ret = cache;
206                         n = n->rb_left;
207                 } else if (bytenr > start) {
208                         if (contains && bytenr <= end) {
209                                 ret = cache;
210                                 break;
211                         }
212                         n = n->rb_right;
213                 } else {
214                         ret = cache;
215                         break;
216                 }
217         }
218         if (ret) {
219                 btrfs_get_block_group(ret);
220                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
221                         info->first_logical_byte = ret->key.objectid;
222         }
223         spin_unlock(&info->block_group_cache_lock);
224
225         return ret;
226 }
227
228 static int add_excluded_extent(struct btrfs_root *root,
229                                u64 start, u64 num_bytes)
230 {
231         u64 end = start + num_bytes - 1;
232         set_extent_bits(&root->fs_info->freed_extents[0],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         set_extent_bits(&root->fs_info->freed_extents[1],
235                         start, end, EXTENT_UPTODATE, GFP_NOFS);
236         return 0;
237 }
238
239 static void free_excluded_extents(struct btrfs_root *root,
240                                   struct btrfs_block_group_cache *cache)
241 {
242         u64 start, end;
243
244         start = cache->key.objectid;
245         end = start + cache->key.offset - 1;
246
247         clear_extent_bits(&root->fs_info->freed_extents[0],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249         clear_extent_bits(&root->fs_info->freed_extents[1],
250                           start, end, EXTENT_UPTODATE, GFP_NOFS);
251 }
252
253 static int exclude_super_stripes(struct btrfs_root *root,
254                                  struct btrfs_block_group_cache *cache)
255 {
256         u64 bytenr;
257         u64 *logical;
258         int stripe_len;
259         int i, nr, ret;
260
261         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
262                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
263                 cache->bytes_super += stripe_len;
264                 ret = add_excluded_extent(root, cache->key.objectid,
265                                           stripe_len);
266                 if (ret)
267                         return ret;
268         }
269
270         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
271                 bytenr = btrfs_sb_offset(i);
272                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
273                                        cache->key.objectid, bytenr,
274                                        0, &logical, &nr, &stripe_len);
275                 if (ret)
276                         return ret;
277
278                 while (nr--) {
279                         u64 start, len;
280
281                         if (logical[nr] > cache->key.objectid +
282                             cache->key.offset)
283                                 continue;
284
285                         if (logical[nr] + stripe_len <= cache->key.objectid)
286                                 continue;
287
288                         start = logical[nr];
289                         if (start < cache->key.objectid) {
290                                 start = cache->key.objectid;
291                                 len = (logical[nr] + stripe_len) - start;
292                         } else {
293                                 len = min_t(u64, stripe_len,
294                                             cache->key.objectid +
295                                             cache->key.offset - start);
296                         }
297
298                         cache->bytes_super += len;
299                         ret = add_excluded_extent(root, start, len);
300                         if (ret) {
301                                 kfree(logical);
302                                 return ret;
303                         }
304                 }
305
306                 kfree(logical);
307         }
308         return 0;
309 }
310
311 static struct btrfs_caching_control *
312 get_caching_control(struct btrfs_block_group_cache *cache)
313 {
314         struct btrfs_caching_control *ctl;
315
316         spin_lock(&cache->lock);
317         if (!cache->caching_ctl) {
318                 spin_unlock(&cache->lock);
319                 return NULL;
320         }
321
322         ctl = cache->caching_ctl;
323         atomic_inc(&ctl->count);
324         spin_unlock(&cache->lock);
325         return ctl;
326 }
327
328 static void put_caching_control(struct btrfs_caching_control *ctl)
329 {
330         if (atomic_dec_and_test(&ctl->count))
331                 kfree(ctl);
332 }
333
334 #ifdef CONFIG_BTRFS_DEBUG
335 static void fragment_free_space(struct btrfs_root *root,
336                                 struct btrfs_block_group_cache *block_group)
337 {
338         u64 start = block_group->key.objectid;
339         u64 len = block_group->key.offset;
340         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
341                 root->nodesize : root->sectorsize;
342         u64 step = chunk << 1;
343
344         while (len > chunk) {
345                 btrfs_remove_free_space(block_group, start, chunk);
346                 start += step;
347                 if (len < step)
348                         len = 0;
349                 else
350                         len -= step;
351         }
352 }
353 #endif
354
355 /*
356  * this is only called by cache_block_group, since we could have freed extents
357  * we need to check the pinned_extents for any extents that can't be used yet
358  * since their free space will be released as soon as the transaction commits.
359  */
360 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
361                               struct btrfs_fs_info *info, u64 start, u64 end)
362 {
363         u64 extent_start, extent_end, size, total_added = 0;
364         int ret;
365
366         while (start < end) {
367                 ret = find_first_extent_bit(info->pinned_extents, start,
368                                             &extent_start, &extent_end,
369                                             EXTENT_DIRTY | EXTENT_UPTODATE,
370                                             NULL);
371                 if (ret)
372                         break;
373
374                 if (extent_start <= start) {
375                         start = extent_end + 1;
376                 } else if (extent_start > start && extent_start < end) {
377                         size = extent_start - start;
378                         total_added += size;
379                         ret = btrfs_add_free_space(block_group, start,
380                                                    size);
381                         BUG_ON(ret); /* -ENOMEM or logic error */
382                         start = extent_end + 1;
383                 } else {
384                         break;
385                 }
386         }
387
388         if (start < end) {
389                 size = end - start;
390                 total_added += size;
391                 ret = btrfs_add_free_space(block_group, start, size);
392                 BUG_ON(ret); /* -ENOMEM or logic error */
393         }
394
395         return total_added;
396 }
397
398 static noinline void caching_thread(struct btrfs_work *work)
399 {
400         struct btrfs_block_group_cache *block_group;
401         struct btrfs_fs_info *fs_info;
402         struct btrfs_caching_control *caching_ctl;
403         struct btrfs_root *extent_root;
404         struct btrfs_path *path;
405         struct extent_buffer *leaf;
406         struct btrfs_key key;
407         u64 total_found = 0;
408         u64 last = 0;
409         u32 nritems;
410         int ret = -ENOMEM;
411         bool wakeup = true;
412
413         caching_ctl = container_of(work, struct btrfs_caching_control, work);
414         block_group = caching_ctl->block_group;
415         fs_info = block_group->fs_info;
416         extent_root = fs_info->extent_root;
417
418         path = btrfs_alloc_path();
419         if (!path)
420                 goto out;
421
422         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
423
424 #ifdef CONFIG_BTRFS_DEBUG
425         /*
426          * If we're fragmenting we don't want to make anybody think we can
427          * allocate from this block group until we've had a chance to fragment
428          * the free space.
429          */
430         if (btrfs_should_fragment_free_space(extent_root, block_group))
431                 wakeup = false;
432 #endif
433         /*
434          * We don't want to deadlock with somebody trying to allocate a new
435          * extent for the extent root while also trying to search the extent
436          * root to add free space.  So we skip locking and search the commit
437          * root, since its read-only
438          */
439         path->skip_locking = 1;
440         path->search_commit_root = 1;
441         path->reada = 1;
442
443         key.objectid = last;
444         key.offset = 0;
445         key.type = BTRFS_EXTENT_ITEM_KEY;
446 again:
447         mutex_lock(&caching_ctl->mutex);
448         /* need to make sure the commit_root doesn't disappear */
449         down_read(&fs_info->commit_root_sem);
450
451 next:
452         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
453         if (ret < 0)
454                 goto err;
455
456         leaf = path->nodes[0];
457         nritems = btrfs_header_nritems(leaf);
458
459         while (1) {
460                 if (btrfs_fs_closing(fs_info) > 1) {
461                         last = (u64)-1;
462                         break;
463                 }
464
465                 if (path->slots[0] < nritems) {
466                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
467                 } else {
468                         ret = find_next_key(path, 0, &key);
469                         if (ret)
470                                 break;
471
472                         if (need_resched() ||
473                             rwsem_is_contended(&fs_info->commit_root_sem)) {
474                                 if (wakeup)
475                                         caching_ctl->progress = last;
476                                 btrfs_release_path(path);
477                                 up_read(&fs_info->commit_root_sem);
478                                 mutex_unlock(&caching_ctl->mutex);
479                                 cond_resched();
480                                 goto again;
481                         }
482
483                         ret = btrfs_next_leaf(extent_root, path);
484                         if (ret < 0)
485                                 goto err;
486                         if (ret)
487                                 break;
488                         leaf = path->nodes[0];
489                         nritems = btrfs_header_nritems(leaf);
490                         continue;
491                 }
492
493                 if (key.objectid < last) {
494                         key.objectid = last;
495                         key.offset = 0;
496                         key.type = BTRFS_EXTENT_ITEM_KEY;
497
498                         if (wakeup)
499                                 caching_ctl->progress = last;
500                         btrfs_release_path(path);
501                         goto next;
502                 }
503
504                 if (key.objectid < block_group->key.objectid) {
505                         path->slots[0]++;
506                         continue;
507                 }
508
509                 if (key.objectid >= block_group->key.objectid +
510                     block_group->key.offset)
511                         break;
512
513                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
514                     key.type == BTRFS_METADATA_ITEM_KEY) {
515                         total_found += add_new_free_space(block_group,
516                                                           fs_info, last,
517                                                           key.objectid);
518                         if (key.type == BTRFS_METADATA_ITEM_KEY)
519                                 last = key.objectid +
520                                         fs_info->tree_root->nodesize;
521                         else
522                                 last = key.objectid + key.offset;
523
524                         if (total_found > (1024 * 1024 * 2)) {
525                                 total_found = 0;
526                                 if (wakeup)
527                                         wake_up(&caching_ctl->wait);
528                         }
529                 }
530                 path->slots[0]++;
531         }
532         ret = 0;
533
534         total_found += add_new_free_space(block_group, fs_info, last,
535                                           block_group->key.objectid +
536                                           block_group->key.offset);
537         spin_lock(&block_group->lock);
538         block_group->caching_ctl = NULL;
539         block_group->cached = BTRFS_CACHE_FINISHED;
540         spin_unlock(&block_group->lock);
541
542 #ifdef CONFIG_BTRFS_DEBUG
543         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
544                 u64 bytes_used;
545
546                 spin_lock(&block_group->space_info->lock);
547                 spin_lock(&block_group->lock);
548                 bytes_used = block_group->key.offset -
549                         btrfs_block_group_used(&block_group->item);
550                 block_group->space_info->bytes_used += bytes_used >> 1;
551                 spin_unlock(&block_group->lock);
552                 spin_unlock(&block_group->space_info->lock);
553                 fragment_free_space(extent_root, block_group);
554         }
555 #endif
556
557         caching_ctl->progress = (u64)-1;
558 err:
559         btrfs_free_path(path);
560         up_read(&fs_info->commit_root_sem);
561
562         free_excluded_extents(extent_root, block_group);
563
564         mutex_unlock(&caching_ctl->mutex);
565 out:
566         if (ret) {
567                 spin_lock(&block_group->lock);
568                 block_group->caching_ctl = NULL;
569                 block_group->cached = BTRFS_CACHE_ERROR;
570                 spin_unlock(&block_group->lock);
571         }
572         wake_up(&caching_ctl->wait);
573
574         put_caching_control(caching_ctl);
575         btrfs_put_block_group(block_group);
576 }
577
578 static int cache_block_group(struct btrfs_block_group_cache *cache,
579                              int load_cache_only)
580 {
581         DEFINE_WAIT(wait);
582         struct btrfs_fs_info *fs_info = cache->fs_info;
583         struct btrfs_caching_control *caching_ctl;
584         int ret = 0;
585
586         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
587         if (!caching_ctl)
588                 return -ENOMEM;
589
590         INIT_LIST_HEAD(&caching_ctl->list);
591         mutex_init(&caching_ctl->mutex);
592         init_waitqueue_head(&caching_ctl->wait);
593         caching_ctl->block_group = cache;
594         caching_ctl->progress = cache->key.objectid;
595         atomic_set(&caching_ctl->count, 1);
596         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
597                         caching_thread, NULL, NULL);
598
599         spin_lock(&cache->lock);
600         /*
601          * This should be a rare occasion, but this could happen I think in the
602          * case where one thread starts to load the space cache info, and then
603          * some other thread starts a transaction commit which tries to do an
604          * allocation while the other thread is still loading the space cache
605          * info.  The previous loop should have kept us from choosing this block
606          * group, but if we've moved to the state where we will wait on caching
607          * block groups we need to first check if we're doing a fast load here,
608          * so we can wait for it to finish, otherwise we could end up allocating
609          * from a block group who's cache gets evicted for one reason or
610          * another.
611          */
612         while (cache->cached == BTRFS_CACHE_FAST) {
613                 struct btrfs_caching_control *ctl;
614
615                 ctl = cache->caching_ctl;
616                 atomic_inc(&ctl->count);
617                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
618                 spin_unlock(&cache->lock);
619
620                 schedule();
621
622                 finish_wait(&ctl->wait, &wait);
623                 put_caching_control(ctl);
624                 spin_lock(&cache->lock);
625         }
626
627         if (cache->cached != BTRFS_CACHE_NO) {
628                 spin_unlock(&cache->lock);
629                 kfree(caching_ctl);
630                 return 0;
631         }
632         WARN_ON(cache->caching_ctl);
633         cache->caching_ctl = caching_ctl;
634         cache->cached = BTRFS_CACHE_FAST;
635         spin_unlock(&cache->lock);
636
637         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
638                 mutex_lock(&caching_ctl->mutex);
639                 ret = load_free_space_cache(fs_info, cache);
640
641                 spin_lock(&cache->lock);
642                 if (ret == 1) {
643                         cache->caching_ctl = NULL;
644                         cache->cached = BTRFS_CACHE_FINISHED;
645                         cache->last_byte_to_unpin = (u64)-1;
646                         caching_ctl->progress = (u64)-1;
647                 } else {
648                         if (load_cache_only) {
649                                 cache->caching_ctl = NULL;
650                                 cache->cached = BTRFS_CACHE_NO;
651                         } else {
652                                 cache->cached = BTRFS_CACHE_STARTED;
653                                 cache->has_caching_ctl = 1;
654                         }
655                 }
656                 spin_unlock(&cache->lock);
657 #ifdef CONFIG_BTRFS_DEBUG
658                 if (ret == 1 &&
659                     btrfs_should_fragment_free_space(fs_info->extent_root,
660                                                      cache)) {
661                         u64 bytes_used;
662
663                         spin_lock(&cache->space_info->lock);
664                         spin_lock(&cache->lock);
665                         bytes_used = cache->key.offset -
666                                 btrfs_block_group_used(&cache->item);
667                         cache->space_info->bytes_used += bytes_used >> 1;
668                         spin_unlock(&cache->lock);
669                         spin_unlock(&cache->space_info->lock);
670                         fragment_free_space(fs_info->extent_root, cache);
671                 }
672 #endif
673                 mutex_unlock(&caching_ctl->mutex);
674
675                 wake_up(&caching_ctl->wait);
676                 if (ret == 1) {
677                         put_caching_control(caching_ctl);
678                         free_excluded_extents(fs_info->extent_root, cache);
679                         return 0;
680                 }
681         } else {
682                 /*
683                  * We are not going to do the fast caching, set cached to the
684                  * appropriate value and wakeup any waiters.
685                  */
686                 spin_lock(&cache->lock);
687                 if (load_cache_only) {
688                         cache->caching_ctl = NULL;
689                         cache->cached = BTRFS_CACHE_NO;
690                 } else {
691                         cache->cached = BTRFS_CACHE_STARTED;
692                         cache->has_caching_ctl = 1;
693                 }
694                 spin_unlock(&cache->lock);
695                 wake_up(&caching_ctl->wait);
696         }
697
698         if (load_cache_only) {
699                 put_caching_control(caching_ctl);
700                 return 0;
701         }
702
703         down_write(&fs_info->commit_root_sem);
704         atomic_inc(&caching_ctl->count);
705         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
706         up_write(&fs_info->commit_root_sem);
707
708         btrfs_get_block_group(cache);
709
710         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
711
712         return ret;
713 }
714
715 /*
716  * return the block group that starts at or after bytenr
717  */
718 static struct btrfs_block_group_cache *
719 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
720 {
721         struct btrfs_block_group_cache *cache;
722
723         cache = block_group_cache_tree_search(info, bytenr, 0);
724
725         return cache;
726 }
727
728 /*
729  * return the block group that contains the given bytenr
730  */
731 struct btrfs_block_group_cache *btrfs_lookup_block_group(
732                                                  struct btrfs_fs_info *info,
733                                                  u64 bytenr)
734 {
735         struct btrfs_block_group_cache *cache;
736
737         cache = block_group_cache_tree_search(info, bytenr, 1);
738
739         return cache;
740 }
741
742 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
743                                                   u64 flags)
744 {
745         struct list_head *head = &info->space_info;
746         struct btrfs_space_info *found;
747
748         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
749
750         rcu_read_lock();
751         list_for_each_entry_rcu(found, head, list) {
752                 if (found->flags & flags) {
753                         rcu_read_unlock();
754                         return found;
755                 }
756         }
757         rcu_read_unlock();
758         return NULL;
759 }
760
761 /*
762  * after adding space to the filesystem, we need to clear the full flags
763  * on all the space infos.
764  */
765 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
766 {
767         struct list_head *head = &info->space_info;
768         struct btrfs_space_info *found;
769
770         rcu_read_lock();
771         list_for_each_entry_rcu(found, head, list)
772                 found->full = 0;
773         rcu_read_unlock();
774 }
775
776 /* simple helper to search for an existing data extent at a given offset */
777 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
778 {
779         int ret;
780         struct btrfs_key key;
781         struct btrfs_path *path;
782
783         path = btrfs_alloc_path();
784         if (!path)
785                 return -ENOMEM;
786
787         key.objectid = start;
788         key.offset = len;
789         key.type = BTRFS_EXTENT_ITEM_KEY;
790         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
791                                 0, 0);
792         btrfs_free_path(path);
793         return ret;
794 }
795
796 /*
797  * helper function to lookup reference count and flags of a tree block.
798  *
799  * the head node for delayed ref is used to store the sum of all the
800  * reference count modifications queued up in the rbtree. the head
801  * node may also store the extent flags to set. This way you can check
802  * to see what the reference count and extent flags would be if all of
803  * the delayed refs are not processed.
804  */
805 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
806                              struct btrfs_root *root, u64 bytenr,
807                              u64 offset, int metadata, u64 *refs, u64 *flags)
808 {
809         struct btrfs_delayed_ref_head *head;
810         struct btrfs_delayed_ref_root *delayed_refs;
811         struct btrfs_path *path;
812         struct btrfs_extent_item *ei;
813         struct extent_buffer *leaf;
814         struct btrfs_key key;
815         u32 item_size;
816         u64 num_refs;
817         u64 extent_flags;
818         int ret;
819
820         /*
821          * If we don't have skinny metadata, don't bother doing anything
822          * different
823          */
824         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
825                 offset = root->nodesize;
826                 metadata = 0;
827         }
828
829         path = btrfs_alloc_path();
830         if (!path)
831                 return -ENOMEM;
832
833         if (!trans) {
834                 path->skip_locking = 1;
835                 path->search_commit_root = 1;
836         }
837
838 search_again:
839         key.objectid = bytenr;
840         key.offset = offset;
841         if (metadata)
842                 key.type = BTRFS_METADATA_ITEM_KEY;
843         else
844                 key.type = BTRFS_EXTENT_ITEM_KEY;
845
846         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
847                                 &key, path, 0, 0);
848         if (ret < 0)
849                 goto out_free;
850
851         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
852                 if (path->slots[0]) {
853                         path->slots[0]--;
854                         btrfs_item_key_to_cpu(path->nodes[0], &key,
855                                               path->slots[0]);
856                         if (key.objectid == bytenr &&
857                             key.type == BTRFS_EXTENT_ITEM_KEY &&
858                             key.offset == root->nodesize)
859                                 ret = 0;
860                 }
861         }
862
863         if (ret == 0) {
864                 leaf = path->nodes[0];
865                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
866                 if (item_size >= sizeof(*ei)) {
867                         ei = btrfs_item_ptr(leaf, path->slots[0],
868                                             struct btrfs_extent_item);
869                         num_refs = btrfs_extent_refs(leaf, ei);
870                         extent_flags = btrfs_extent_flags(leaf, ei);
871                 } else {
872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
873                         struct btrfs_extent_item_v0 *ei0;
874                         BUG_ON(item_size != sizeof(*ei0));
875                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
876                                              struct btrfs_extent_item_v0);
877                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
878                         /* FIXME: this isn't correct for data */
879                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
880 #else
881                         BUG();
882 #endif
883                 }
884                 BUG_ON(num_refs == 0);
885         } else {
886                 num_refs = 0;
887                 extent_flags = 0;
888                 ret = 0;
889         }
890
891         if (!trans)
892                 goto out;
893
894         delayed_refs = &trans->transaction->delayed_refs;
895         spin_lock(&delayed_refs->lock);
896         head = btrfs_find_delayed_ref_head(trans, bytenr);
897         if (head) {
898                 if (!mutex_trylock(&head->mutex)) {
899                         atomic_inc(&head->node.refs);
900                         spin_unlock(&delayed_refs->lock);
901
902                         btrfs_release_path(path);
903
904                         /*
905                          * Mutex was contended, block until it's released and try
906                          * again
907                          */
908                         mutex_lock(&head->mutex);
909                         mutex_unlock(&head->mutex);
910                         btrfs_put_delayed_ref(&head->node);
911                         goto search_again;
912                 }
913                 spin_lock(&head->lock);
914                 if (head->extent_op && head->extent_op->update_flags)
915                         extent_flags |= head->extent_op->flags_to_set;
916                 else
917                         BUG_ON(num_refs == 0);
918
919                 num_refs += head->node.ref_mod;
920                 spin_unlock(&head->lock);
921                 mutex_unlock(&head->mutex);
922         }
923         spin_unlock(&delayed_refs->lock);
924 out:
925         WARN_ON(num_refs == 0);
926         if (refs)
927                 *refs = num_refs;
928         if (flags)
929                 *flags = extent_flags;
930 out_free:
931         btrfs_free_path(path);
932         return ret;
933 }
934
935 /*
936  * Back reference rules.  Back refs have three main goals:
937  *
938  * 1) differentiate between all holders of references to an extent so that
939  *    when a reference is dropped we can make sure it was a valid reference
940  *    before freeing the extent.
941  *
942  * 2) Provide enough information to quickly find the holders of an extent
943  *    if we notice a given block is corrupted or bad.
944  *
945  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
946  *    maintenance.  This is actually the same as #2, but with a slightly
947  *    different use case.
948  *
949  * There are two kinds of back refs. The implicit back refs is optimized
950  * for pointers in non-shared tree blocks. For a given pointer in a block,
951  * back refs of this kind provide information about the block's owner tree
952  * and the pointer's key. These information allow us to find the block by
953  * b-tree searching. The full back refs is for pointers in tree blocks not
954  * referenced by their owner trees. The location of tree block is recorded
955  * in the back refs. Actually the full back refs is generic, and can be
956  * used in all cases the implicit back refs is used. The major shortcoming
957  * of the full back refs is its overhead. Every time a tree block gets
958  * COWed, we have to update back refs entry for all pointers in it.
959  *
960  * For a newly allocated tree block, we use implicit back refs for
961  * pointers in it. This means most tree related operations only involve
962  * implicit back refs. For a tree block created in old transaction, the
963  * only way to drop a reference to it is COW it. So we can detect the
964  * event that tree block loses its owner tree's reference and do the
965  * back refs conversion.
966  *
967  * When a tree block is COW'd through a tree, there are four cases:
968  *
969  * The reference count of the block is one and the tree is the block's
970  * owner tree. Nothing to do in this case.
971  *
972  * The reference count of the block is one and the tree is not the
973  * block's owner tree. In this case, full back refs is used for pointers
974  * in the block. Remove these full back refs, add implicit back refs for
975  * every pointers in the new block.
976  *
977  * The reference count of the block is greater than one and the tree is
978  * the block's owner tree. In this case, implicit back refs is used for
979  * pointers in the block. Add full back refs for every pointers in the
980  * block, increase lower level extents' reference counts. The original
981  * implicit back refs are entailed to the new block.
982  *
983  * The reference count of the block is greater than one and the tree is
984  * not the block's owner tree. Add implicit back refs for every pointer in
985  * the new block, increase lower level extents' reference count.
986  *
987  * Back Reference Key composing:
988  *
989  * The key objectid corresponds to the first byte in the extent,
990  * The key type is used to differentiate between types of back refs.
991  * There are different meanings of the key offset for different types
992  * of back refs.
993  *
994  * File extents can be referenced by:
995  *
996  * - multiple snapshots, subvolumes, or different generations in one subvol
997  * - different files inside a single subvolume
998  * - different offsets inside a file (bookend extents in file.c)
999  *
1000  * The extent ref structure for the implicit back refs has fields for:
1001  *
1002  * - Objectid of the subvolume root
1003  * - objectid of the file holding the reference
1004  * - original offset in the file
1005  * - how many bookend extents
1006  *
1007  * The key offset for the implicit back refs is hash of the first
1008  * three fields.
1009  *
1010  * The extent ref structure for the full back refs has field for:
1011  *
1012  * - number of pointers in the tree leaf
1013  *
1014  * The key offset for the implicit back refs is the first byte of
1015  * the tree leaf
1016  *
1017  * When a file extent is allocated, The implicit back refs is used.
1018  * the fields are filled in:
1019  *
1020  *     (root_key.objectid, inode objectid, offset in file, 1)
1021  *
1022  * When a file extent is removed file truncation, we find the
1023  * corresponding implicit back refs and check the following fields:
1024  *
1025  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1026  *
1027  * Btree extents can be referenced by:
1028  *
1029  * - Different subvolumes
1030  *
1031  * Both the implicit back refs and the full back refs for tree blocks
1032  * only consist of key. The key offset for the implicit back refs is
1033  * objectid of block's owner tree. The key offset for the full back refs
1034  * is the first byte of parent block.
1035  *
1036  * When implicit back refs is used, information about the lowest key and
1037  * level of the tree block are required. These information are stored in
1038  * tree block info structure.
1039  */
1040
1041 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1042 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1043                                   struct btrfs_root *root,
1044                                   struct btrfs_path *path,
1045                                   u64 owner, u32 extra_size)
1046 {
1047         struct btrfs_extent_item *item;
1048         struct btrfs_extent_item_v0 *ei0;
1049         struct btrfs_extent_ref_v0 *ref0;
1050         struct btrfs_tree_block_info *bi;
1051         struct extent_buffer *leaf;
1052         struct btrfs_key key;
1053         struct btrfs_key found_key;
1054         u32 new_size = sizeof(*item);
1055         u64 refs;
1056         int ret;
1057
1058         leaf = path->nodes[0];
1059         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1060
1061         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1062         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1063                              struct btrfs_extent_item_v0);
1064         refs = btrfs_extent_refs_v0(leaf, ei0);
1065
1066         if (owner == (u64)-1) {
1067                 while (1) {
1068                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1069                                 ret = btrfs_next_leaf(root, path);
1070                                 if (ret < 0)
1071                                         return ret;
1072                                 BUG_ON(ret > 0); /* Corruption */
1073                                 leaf = path->nodes[0];
1074                         }
1075                         btrfs_item_key_to_cpu(leaf, &found_key,
1076                                               path->slots[0]);
1077                         BUG_ON(key.objectid != found_key.objectid);
1078                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1079                                 path->slots[0]++;
1080                                 continue;
1081                         }
1082                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1083                                               struct btrfs_extent_ref_v0);
1084                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1085                         break;
1086                 }
1087         }
1088         btrfs_release_path(path);
1089
1090         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1091                 new_size += sizeof(*bi);
1092
1093         new_size -= sizeof(*ei0);
1094         ret = btrfs_search_slot(trans, root, &key, path,
1095                                 new_size + extra_size, 1);
1096         if (ret < 0)
1097                 return ret;
1098         BUG_ON(ret); /* Corruption */
1099
1100         btrfs_extend_item(root, path, new_size);
1101
1102         leaf = path->nodes[0];
1103         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1104         btrfs_set_extent_refs(leaf, item, refs);
1105         /* FIXME: get real generation */
1106         btrfs_set_extent_generation(leaf, item, 0);
1107         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1108                 btrfs_set_extent_flags(leaf, item,
1109                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1110                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1111                 bi = (struct btrfs_tree_block_info *)(item + 1);
1112                 /* FIXME: get first key of the block */
1113                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1114                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1115         } else {
1116                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1117         }
1118         btrfs_mark_buffer_dirty(leaf);
1119         return 0;
1120 }
1121 #endif
1122
1123 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1124 {
1125         u32 high_crc = ~(u32)0;
1126         u32 low_crc = ~(u32)0;
1127         __le64 lenum;
1128
1129         lenum = cpu_to_le64(root_objectid);
1130         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1131         lenum = cpu_to_le64(owner);
1132         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1133         lenum = cpu_to_le64(offset);
1134         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1135
1136         return ((u64)high_crc << 31) ^ (u64)low_crc;
1137 }
1138
1139 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1140                                      struct btrfs_extent_data_ref *ref)
1141 {
1142         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1143                                     btrfs_extent_data_ref_objectid(leaf, ref),
1144                                     btrfs_extent_data_ref_offset(leaf, ref));
1145 }
1146
1147 static int match_extent_data_ref(struct extent_buffer *leaf,
1148                                  struct btrfs_extent_data_ref *ref,
1149                                  u64 root_objectid, u64 owner, u64 offset)
1150 {
1151         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1152             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1153             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1154                 return 0;
1155         return 1;
1156 }
1157
1158 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1159                                            struct btrfs_root *root,
1160                                            struct btrfs_path *path,
1161                                            u64 bytenr, u64 parent,
1162                                            u64 root_objectid,
1163                                            u64 owner, u64 offset)
1164 {
1165         struct btrfs_key key;
1166         struct btrfs_extent_data_ref *ref;
1167         struct extent_buffer *leaf;
1168         u32 nritems;
1169         int ret;
1170         int recow;
1171         int err = -ENOENT;
1172
1173         key.objectid = bytenr;
1174         if (parent) {
1175                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1176                 key.offset = parent;
1177         } else {
1178                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179                 key.offset = hash_extent_data_ref(root_objectid,
1180                                                   owner, offset);
1181         }
1182 again:
1183         recow = 0;
1184         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1185         if (ret < 0) {
1186                 err = ret;
1187                 goto fail;
1188         }
1189
1190         if (parent) {
1191                 if (!ret)
1192                         return 0;
1193 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1194                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1195                 btrfs_release_path(path);
1196                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1197                 if (ret < 0) {
1198                         err = ret;
1199                         goto fail;
1200                 }
1201                 if (!ret)
1202                         return 0;
1203 #endif
1204                 goto fail;
1205         }
1206
1207         leaf = path->nodes[0];
1208         nritems = btrfs_header_nritems(leaf);
1209         while (1) {
1210                 if (path->slots[0] >= nritems) {
1211                         ret = btrfs_next_leaf(root, path);
1212                         if (ret < 0)
1213                                 err = ret;
1214                         if (ret)
1215                                 goto fail;
1216
1217                         leaf = path->nodes[0];
1218                         nritems = btrfs_header_nritems(leaf);
1219                         recow = 1;
1220                 }
1221
1222                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1223                 if (key.objectid != bytenr ||
1224                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1225                         goto fail;
1226
1227                 ref = btrfs_item_ptr(leaf, path->slots[0],
1228                                      struct btrfs_extent_data_ref);
1229
1230                 if (match_extent_data_ref(leaf, ref, root_objectid,
1231                                           owner, offset)) {
1232                         if (recow) {
1233                                 btrfs_release_path(path);
1234                                 goto again;
1235                         }
1236                         err = 0;
1237                         break;
1238                 }
1239                 path->slots[0]++;
1240         }
1241 fail:
1242         return err;
1243 }
1244
1245 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1246                                            struct btrfs_root *root,
1247                                            struct btrfs_path *path,
1248                                            u64 bytenr, u64 parent,
1249                                            u64 root_objectid, u64 owner,
1250                                            u64 offset, int refs_to_add)
1251 {
1252         struct btrfs_key key;
1253         struct extent_buffer *leaf;
1254         u32 size;
1255         u32 num_refs;
1256         int ret;
1257
1258         key.objectid = bytenr;
1259         if (parent) {
1260                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1261                 key.offset = parent;
1262                 size = sizeof(struct btrfs_shared_data_ref);
1263         } else {
1264                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1265                 key.offset = hash_extent_data_ref(root_objectid,
1266                                                   owner, offset);
1267                 size = sizeof(struct btrfs_extent_data_ref);
1268         }
1269
1270         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1271         if (ret && ret != -EEXIST)
1272                 goto fail;
1273
1274         leaf = path->nodes[0];
1275         if (parent) {
1276                 struct btrfs_shared_data_ref *ref;
1277                 ref = btrfs_item_ptr(leaf, path->slots[0],
1278                                      struct btrfs_shared_data_ref);
1279                 if (ret == 0) {
1280                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1281                 } else {
1282                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1283                         num_refs += refs_to_add;
1284                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1285                 }
1286         } else {
1287                 struct btrfs_extent_data_ref *ref;
1288                 while (ret == -EEXIST) {
1289                         ref = btrfs_item_ptr(leaf, path->slots[0],
1290                                              struct btrfs_extent_data_ref);
1291                         if (match_extent_data_ref(leaf, ref, root_objectid,
1292                                                   owner, offset))
1293                                 break;
1294                         btrfs_release_path(path);
1295                         key.offset++;
1296                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1297                                                       size);
1298                         if (ret && ret != -EEXIST)
1299                                 goto fail;
1300
1301                         leaf = path->nodes[0];
1302                 }
1303                 ref = btrfs_item_ptr(leaf, path->slots[0],
1304                                      struct btrfs_extent_data_ref);
1305                 if (ret == 0) {
1306                         btrfs_set_extent_data_ref_root(leaf, ref,
1307                                                        root_objectid);
1308                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1309                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1310                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1311                 } else {
1312                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1313                         num_refs += refs_to_add;
1314                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1315                 }
1316         }
1317         btrfs_mark_buffer_dirty(leaf);
1318         ret = 0;
1319 fail:
1320         btrfs_release_path(path);
1321         return ret;
1322 }
1323
1324 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1325                                            struct btrfs_root *root,
1326                                            struct btrfs_path *path,
1327                                            int refs_to_drop, int *last_ref)
1328 {
1329         struct btrfs_key key;
1330         struct btrfs_extent_data_ref *ref1 = NULL;
1331         struct btrfs_shared_data_ref *ref2 = NULL;
1332         struct extent_buffer *leaf;
1333         u32 num_refs = 0;
1334         int ret = 0;
1335
1336         leaf = path->nodes[0];
1337         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1338
1339         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341                                       struct btrfs_extent_data_ref);
1342                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_shared_data_ref);
1346                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349                 struct btrfs_extent_ref_v0 *ref0;
1350                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_ref_v0);
1352                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354         } else {
1355                 BUG();
1356         }
1357
1358         BUG_ON(num_refs < refs_to_drop);
1359         num_refs -= refs_to_drop;
1360
1361         if (num_refs == 0) {
1362                 ret = btrfs_del_item(trans, root, path);
1363                 *last_ref = 1;
1364         } else {
1365                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1366                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1367                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1368                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1370                 else {
1371                         struct btrfs_extent_ref_v0 *ref0;
1372                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1373                                         struct btrfs_extent_ref_v0);
1374                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1375                 }
1376 #endif
1377                 btrfs_mark_buffer_dirty(leaf);
1378         }
1379         return ret;
1380 }
1381
1382 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1383                                           struct btrfs_extent_inline_ref *iref)
1384 {
1385         struct btrfs_key key;
1386         struct extent_buffer *leaf;
1387         struct btrfs_extent_data_ref *ref1;
1388         struct btrfs_shared_data_ref *ref2;
1389         u32 num_refs = 0;
1390
1391         leaf = path->nodes[0];
1392         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1393         if (iref) {
1394                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1395                     BTRFS_EXTENT_DATA_REF_KEY) {
1396                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1397                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1398                 } else {
1399                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1400                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1401                 }
1402         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1403                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1404                                       struct btrfs_extent_data_ref);
1405                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1406         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1407                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1408                                       struct btrfs_shared_data_ref);
1409                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1411         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1412                 struct btrfs_extent_ref_v0 *ref0;
1413                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1414                                       struct btrfs_extent_ref_v0);
1415                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1416 #endif
1417         } else {
1418                 WARN_ON(1);
1419         }
1420         return num_refs;
1421 }
1422
1423 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1424                                           struct btrfs_root *root,
1425                                           struct btrfs_path *path,
1426                                           u64 bytenr, u64 parent,
1427                                           u64 root_objectid)
1428 {
1429         struct btrfs_key key;
1430         int ret;
1431
1432         key.objectid = bytenr;
1433         if (parent) {
1434                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1435                 key.offset = parent;
1436         } else {
1437                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1438                 key.offset = root_objectid;
1439         }
1440
1441         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1442         if (ret > 0)
1443                 ret = -ENOENT;
1444 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1445         if (ret == -ENOENT && parent) {
1446                 btrfs_release_path(path);
1447                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1448                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1449                 if (ret > 0)
1450                         ret = -ENOENT;
1451         }
1452 #endif
1453         return ret;
1454 }
1455
1456 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1457                                           struct btrfs_root *root,
1458                                           struct btrfs_path *path,
1459                                           u64 bytenr, u64 parent,
1460                                           u64 root_objectid)
1461 {
1462         struct btrfs_key key;
1463         int ret;
1464
1465         key.objectid = bytenr;
1466         if (parent) {
1467                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1468                 key.offset = parent;
1469         } else {
1470                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1471                 key.offset = root_objectid;
1472         }
1473
1474         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1475         btrfs_release_path(path);
1476         return ret;
1477 }
1478
1479 static inline int extent_ref_type(u64 parent, u64 owner)
1480 {
1481         int type;
1482         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1483                 if (parent > 0)
1484                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1485                 else
1486                         type = BTRFS_TREE_BLOCK_REF_KEY;
1487         } else {
1488                 if (parent > 0)
1489                         type = BTRFS_SHARED_DATA_REF_KEY;
1490                 else
1491                         type = BTRFS_EXTENT_DATA_REF_KEY;
1492         }
1493         return type;
1494 }
1495
1496 static int find_next_key(struct btrfs_path *path, int level,
1497                          struct btrfs_key *key)
1498
1499 {
1500         for (; level < BTRFS_MAX_LEVEL; level++) {
1501                 if (!path->nodes[level])
1502                         break;
1503                 if (path->slots[level] + 1 >=
1504                     btrfs_header_nritems(path->nodes[level]))
1505                         continue;
1506                 if (level == 0)
1507                         btrfs_item_key_to_cpu(path->nodes[level], key,
1508                                               path->slots[level] + 1);
1509                 else
1510                         btrfs_node_key_to_cpu(path->nodes[level], key,
1511                                               path->slots[level] + 1);
1512                 return 0;
1513         }
1514         return 1;
1515 }
1516
1517 /*
1518  * look for inline back ref. if back ref is found, *ref_ret is set
1519  * to the address of inline back ref, and 0 is returned.
1520  *
1521  * if back ref isn't found, *ref_ret is set to the address where it
1522  * should be inserted, and -ENOENT is returned.
1523  *
1524  * if insert is true and there are too many inline back refs, the path
1525  * points to the extent item, and -EAGAIN is returned.
1526  *
1527  * NOTE: inline back refs are ordered in the same way that back ref
1528  *       items in the tree are ordered.
1529  */
1530 static noinline_for_stack
1531 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1532                                  struct btrfs_root *root,
1533                                  struct btrfs_path *path,
1534                                  struct btrfs_extent_inline_ref **ref_ret,
1535                                  u64 bytenr, u64 num_bytes,
1536                                  u64 parent, u64 root_objectid,
1537                                  u64 owner, u64 offset, int insert)
1538 {
1539         struct btrfs_key key;
1540         struct extent_buffer *leaf;
1541         struct btrfs_extent_item *ei;
1542         struct btrfs_extent_inline_ref *iref;
1543         u64 flags;
1544         u64 item_size;
1545         unsigned long ptr;
1546         unsigned long end;
1547         int extra_size;
1548         int type;
1549         int want;
1550         int ret;
1551         int err = 0;
1552         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1553                                                  SKINNY_METADATA);
1554
1555         key.objectid = bytenr;
1556         key.type = BTRFS_EXTENT_ITEM_KEY;
1557         key.offset = num_bytes;
1558
1559         want = extent_ref_type(parent, owner);
1560         if (insert) {
1561                 extra_size = btrfs_extent_inline_ref_size(want);
1562                 path->keep_locks = 1;
1563         } else
1564                 extra_size = -1;
1565
1566         /*
1567          * Owner is our parent level, so we can just add one to get the level
1568          * for the block we are interested in.
1569          */
1570         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1571                 key.type = BTRFS_METADATA_ITEM_KEY;
1572                 key.offset = owner;
1573         }
1574
1575 again:
1576         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1577         if (ret < 0) {
1578                 err = ret;
1579                 goto out;
1580         }
1581
1582         /*
1583          * We may be a newly converted file system which still has the old fat
1584          * extent entries for metadata, so try and see if we have one of those.
1585          */
1586         if (ret > 0 && skinny_metadata) {
1587                 skinny_metadata = false;
1588                 if (path->slots[0]) {
1589                         path->slots[0]--;
1590                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1591                                               path->slots[0]);
1592                         if (key.objectid == bytenr &&
1593                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1594                             key.offset == num_bytes)
1595                                 ret = 0;
1596                 }
1597                 if (ret) {
1598                         key.objectid = bytenr;
1599                         key.type = BTRFS_EXTENT_ITEM_KEY;
1600                         key.offset = num_bytes;
1601                         btrfs_release_path(path);
1602                         goto again;
1603                 }
1604         }
1605
1606         if (ret && !insert) {
1607                 err = -ENOENT;
1608                 goto out;
1609         } else if (WARN_ON(ret)) {
1610                 err = -EIO;
1611                 goto out;
1612         }
1613
1614         leaf = path->nodes[0];
1615         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1616 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1617         if (item_size < sizeof(*ei)) {
1618                 if (!insert) {
1619                         err = -ENOENT;
1620                         goto out;
1621                 }
1622                 ret = convert_extent_item_v0(trans, root, path, owner,
1623                                              extra_size);
1624                 if (ret < 0) {
1625                         err = ret;
1626                         goto out;
1627                 }
1628                 leaf = path->nodes[0];
1629                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1630         }
1631 #endif
1632         BUG_ON(item_size < sizeof(*ei));
1633
1634         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1635         flags = btrfs_extent_flags(leaf, ei);
1636
1637         ptr = (unsigned long)(ei + 1);
1638         end = (unsigned long)ei + item_size;
1639
1640         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1641                 ptr += sizeof(struct btrfs_tree_block_info);
1642                 BUG_ON(ptr > end);
1643         }
1644
1645         err = -ENOENT;
1646         while (1) {
1647                 if (ptr >= end) {
1648                         WARN_ON(ptr > end);
1649                         break;
1650                 }
1651                 iref = (struct btrfs_extent_inline_ref *)ptr;
1652                 type = btrfs_extent_inline_ref_type(leaf, iref);
1653                 if (want < type)
1654                         break;
1655                 if (want > type) {
1656                         ptr += btrfs_extent_inline_ref_size(type);
1657                         continue;
1658                 }
1659
1660                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1661                         struct btrfs_extent_data_ref *dref;
1662                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1663                         if (match_extent_data_ref(leaf, dref, root_objectid,
1664                                                   owner, offset)) {
1665                                 err = 0;
1666                                 break;
1667                         }
1668                         if (hash_extent_data_ref_item(leaf, dref) <
1669                             hash_extent_data_ref(root_objectid, owner, offset))
1670                                 break;
1671                 } else {
1672                         u64 ref_offset;
1673                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1674                         if (parent > 0) {
1675                                 if (parent == ref_offset) {
1676                                         err = 0;
1677                                         break;
1678                                 }
1679                                 if (ref_offset < parent)
1680                                         break;
1681                         } else {
1682                                 if (root_objectid == ref_offset) {
1683                                         err = 0;
1684                                         break;
1685                                 }
1686                                 if (ref_offset < root_objectid)
1687                                         break;
1688                         }
1689                 }
1690                 ptr += btrfs_extent_inline_ref_size(type);
1691         }
1692         if (err == -ENOENT && insert) {
1693                 if (item_size + extra_size >=
1694                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1695                         err = -EAGAIN;
1696                         goto out;
1697                 }
1698                 /*
1699                  * To add new inline back ref, we have to make sure
1700                  * there is no corresponding back ref item.
1701                  * For simplicity, we just do not add new inline back
1702                  * ref if there is any kind of item for this block
1703                  */
1704                 if (find_next_key(path, 0, &key) == 0 &&
1705                     key.objectid == bytenr &&
1706                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1707                         err = -EAGAIN;
1708                         goto out;
1709                 }
1710         }
1711         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1712 out:
1713         if (insert) {
1714                 path->keep_locks = 0;
1715                 btrfs_unlock_up_safe(path, 1);
1716         }
1717         return err;
1718 }
1719
1720 /*
1721  * helper to add new inline back ref
1722  */
1723 static noinline_for_stack
1724 void setup_inline_extent_backref(struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref *iref,
1727                                  u64 parent, u64 root_objectid,
1728                                  u64 owner, u64 offset, int refs_to_add,
1729                                  struct btrfs_delayed_extent_op *extent_op)
1730 {
1731         struct extent_buffer *leaf;
1732         struct btrfs_extent_item *ei;
1733         unsigned long ptr;
1734         unsigned long end;
1735         unsigned long item_offset;
1736         u64 refs;
1737         int size;
1738         int type;
1739
1740         leaf = path->nodes[0];
1741         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1742         item_offset = (unsigned long)iref - (unsigned long)ei;
1743
1744         type = extent_ref_type(parent, owner);
1745         size = btrfs_extent_inline_ref_size(type);
1746
1747         btrfs_extend_item(root, path, size);
1748
1749         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1750         refs = btrfs_extent_refs(leaf, ei);
1751         refs += refs_to_add;
1752         btrfs_set_extent_refs(leaf, ei, refs);
1753         if (extent_op)
1754                 __run_delayed_extent_op(extent_op, leaf, ei);
1755
1756         ptr = (unsigned long)ei + item_offset;
1757         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1758         if (ptr < end - size)
1759                 memmove_extent_buffer(leaf, ptr + size, ptr,
1760                                       end - size - ptr);
1761
1762         iref = (struct btrfs_extent_inline_ref *)ptr;
1763         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1764         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1765                 struct btrfs_extent_data_ref *dref;
1766                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1767                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1768                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1769                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1770                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1771         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1772                 struct btrfs_shared_data_ref *sref;
1773                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1774                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1775                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1776         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1777                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1778         } else {
1779                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1780         }
1781         btrfs_mark_buffer_dirty(leaf);
1782 }
1783
1784 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1785                                  struct btrfs_root *root,
1786                                  struct btrfs_path *path,
1787                                  struct btrfs_extent_inline_ref **ref_ret,
1788                                  u64 bytenr, u64 num_bytes, u64 parent,
1789                                  u64 root_objectid, u64 owner, u64 offset)
1790 {
1791         int ret;
1792
1793         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1794                                            bytenr, num_bytes, parent,
1795                                            root_objectid, owner, offset, 0);
1796         if (ret != -ENOENT)
1797                 return ret;
1798
1799         btrfs_release_path(path);
1800         *ref_ret = NULL;
1801
1802         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1803                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1804                                             root_objectid);
1805         } else {
1806                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1807                                              root_objectid, owner, offset);
1808         }
1809         return ret;
1810 }
1811
1812 /*
1813  * helper to update/remove inline back ref
1814  */
1815 static noinline_for_stack
1816 void update_inline_extent_backref(struct btrfs_root *root,
1817                                   struct btrfs_path *path,
1818                                   struct btrfs_extent_inline_ref *iref,
1819                                   int refs_to_mod,
1820                                   struct btrfs_delayed_extent_op *extent_op,
1821                                   int *last_ref)
1822 {
1823         struct extent_buffer *leaf;
1824         struct btrfs_extent_item *ei;
1825         struct btrfs_extent_data_ref *dref = NULL;
1826         struct btrfs_shared_data_ref *sref = NULL;
1827         unsigned long ptr;
1828         unsigned long end;
1829         u32 item_size;
1830         int size;
1831         int type;
1832         u64 refs;
1833
1834         leaf = path->nodes[0];
1835         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836         refs = btrfs_extent_refs(leaf, ei);
1837         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1838         refs += refs_to_mod;
1839         btrfs_set_extent_refs(leaf, ei, refs);
1840         if (extent_op)
1841                 __run_delayed_extent_op(extent_op, leaf, ei);
1842
1843         type = btrfs_extent_inline_ref_type(leaf, iref);
1844
1845         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1846                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1847                 refs = btrfs_extent_data_ref_count(leaf, dref);
1848         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1849                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1850                 refs = btrfs_shared_data_ref_count(leaf, sref);
1851         } else {
1852                 refs = 1;
1853                 BUG_ON(refs_to_mod != -1);
1854         }
1855
1856         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1857         refs += refs_to_mod;
1858
1859         if (refs > 0) {
1860                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1861                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1862                 else
1863                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1864         } else {
1865                 *last_ref = 1;
1866                 size =  btrfs_extent_inline_ref_size(type);
1867                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1868                 ptr = (unsigned long)iref;
1869                 end = (unsigned long)ei + item_size;
1870                 if (ptr + size < end)
1871                         memmove_extent_buffer(leaf, ptr, ptr + size,
1872                                               end - ptr - size);
1873                 item_size -= size;
1874                 btrfs_truncate_item(root, path, item_size, 1);
1875         }
1876         btrfs_mark_buffer_dirty(leaf);
1877 }
1878
1879 static noinline_for_stack
1880 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1881                                  struct btrfs_root *root,
1882                                  struct btrfs_path *path,
1883                                  u64 bytenr, u64 num_bytes, u64 parent,
1884                                  u64 root_objectid, u64 owner,
1885                                  u64 offset, int refs_to_add,
1886                                  struct btrfs_delayed_extent_op *extent_op)
1887 {
1888         struct btrfs_extent_inline_ref *iref;
1889         int ret;
1890
1891         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1892                                            bytenr, num_bytes, parent,
1893                                            root_objectid, owner, offset, 1);
1894         if (ret == 0) {
1895                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1896                 update_inline_extent_backref(root, path, iref,
1897                                              refs_to_add, extent_op, NULL);
1898         } else if (ret == -ENOENT) {
1899                 setup_inline_extent_backref(root, path, iref, parent,
1900                                             root_objectid, owner, offset,
1901                                             refs_to_add, extent_op);
1902                 ret = 0;
1903         }
1904         return ret;
1905 }
1906
1907 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1908                                  struct btrfs_root *root,
1909                                  struct btrfs_path *path,
1910                                  u64 bytenr, u64 parent, u64 root_objectid,
1911                                  u64 owner, u64 offset, int refs_to_add)
1912 {
1913         int ret;
1914         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1915                 BUG_ON(refs_to_add != 1);
1916                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1917                                             parent, root_objectid);
1918         } else {
1919                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1920                                              parent, root_objectid,
1921                                              owner, offset, refs_to_add);
1922         }
1923         return ret;
1924 }
1925
1926 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1927                                  struct btrfs_root *root,
1928                                  struct btrfs_path *path,
1929                                  struct btrfs_extent_inline_ref *iref,
1930                                  int refs_to_drop, int is_data, int *last_ref)
1931 {
1932         int ret = 0;
1933
1934         BUG_ON(!is_data && refs_to_drop != 1);
1935         if (iref) {
1936                 update_inline_extent_backref(root, path, iref,
1937                                              -refs_to_drop, NULL, last_ref);
1938         } else if (is_data) {
1939                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1940                                              last_ref);
1941         } else {
1942                 *last_ref = 1;
1943                 ret = btrfs_del_item(trans, root, path);
1944         }
1945         return ret;
1946 }
1947
1948 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1949 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1950                                u64 *discarded_bytes)
1951 {
1952         int j, ret = 0;
1953         u64 bytes_left, end;
1954         u64 aligned_start = ALIGN(start, 1 << 9);
1955
1956         if (WARN_ON(start != aligned_start)) {
1957                 len -= aligned_start - start;
1958                 len = round_down(len, 1 << 9);
1959                 start = aligned_start;
1960         }
1961
1962         *discarded_bytes = 0;
1963
1964         if (!len)
1965                 return 0;
1966
1967         end = start + len;
1968         bytes_left = len;
1969
1970         /* Skip any superblocks on this device. */
1971         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1972                 u64 sb_start = btrfs_sb_offset(j);
1973                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1974                 u64 size = sb_start - start;
1975
1976                 if (!in_range(sb_start, start, bytes_left) &&
1977                     !in_range(sb_end, start, bytes_left) &&
1978                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1979                         continue;
1980
1981                 /*
1982                  * Superblock spans beginning of range.  Adjust start and
1983                  * try again.
1984                  */
1985                 if (sb_start <= start) {
1986                         start += sb_end - start;
1987                         if (start > end) {
1988                                 bytes_left = 0;
1989                                 break;
1990                         }
1991                         bytes_left = end - start;
1992                         continue;
1993                 }
1994
1995                 if (size) {
1996                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1997                                                    GFP_NOFS, 0);
1998                         if (!ret)
1999                                 *discarded_bytes += size;
2000                         else if (ret != -EOPNOTSUPP)
2001                                 return ret;
2002                 }
2003
2004                 start = sb_end;
2005                 if (start > end) {
2006                         bytes_left = 0;
2007                         break;
2008                 }
2009                 bytes_left = end - start;
2010         }
2011
2012         if (bytes_left) {
2013                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2014                                            GFP_NOFS, 0);
2015                 if (!ret)
2016                         *discarded_bytes += bytes_left;
2017         }
2018         return ret;
2019 }
2020
2021 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2022                          u64 num_bytes, u64 *actual_bytes)
2023 {
2024         int ret;
2025         u64 discarded_bytes = 0;
2026         struct btrfs_bio *bbio = NULL;
2027
2028
2029         /* Tell the block device(s) that the sectors can be discarded */
2030         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2031                               bytenr, &num_bytes, &bbio, 0);
2032         /* Error condition is -ENOMEM */
2033         if (!ret) {
2034                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2035                 int i;
2036
2037
2038                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2039                         u64 bytes;
2040                         if (!stripe->dev->can_discard)
2041                                 continue;
2042
2043                         ret = btrfs_issue_discard(stripe->dev->bdev,
2044                                                   stripe->physical,
2045                                                   stripe->length,
2046                                                   &bytes);
2047                         if (!ret)
2048                                 discarded_bytes += bytes;
2049                         else if (ret != -EOPNOTSUPP)
2050                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2051
2052                         /*
2053                          * Just in case we get back EOPNOTSUPP for some reason,
2054                          * just ignore the return value so we don't screw up
2055                          * people calling discard_extent.
2056                          */
2057                         ret = 0;
2058                 }
2059                 btrfs_put_bbio(bbio);
2060         }
2061
2062         if (actual_bytes)
2063                 *actual_bytes = discarded_bytes;
2064
2065
2066         if (ret == -EOPNOTSUPP)
2067                 ret = 0;
2068         return ret;
2069 }
2070
2071 /* Can return -ENOMEM */
2072 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2073                          struct btrfs_root *root,
2074                          u64 bytenr, u64 num_bytes, u64 parent,
2075                          u64 root_objectid, u64 owner, u64 offset)
2076 {
2077         int ret;
2078         struct btrfs_fs_info *fs_info = root->fs_info;
2079
2080         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2081                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2082
2083         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2084                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2085                                         num_bytes,
2086                                         parent, root_objectid, (int)owner,
2087                                         BTRFS_ADD_DELAYED_REF, NULL);
2088         } else {
2089                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2090                                         num_bytes, parent, root_objectid,
2091                                         owner, offset, 0,
2092                                         BTRFS_ADD_DELAYED_REF, NULL);
2093         }
2094         return ret;
2095 }
2096
2097 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2098                                   struct btrfs_root *root,
2099                                   struct btrfs_delayed_ref_node *node,
2100                                   u64 parent, u64 root_objectid,
2101                                   u64 owner, u64 offset, int refs_to_add,
2102                                   struct btrfs_delayed_extent_op *extent_op)
2103 {
2104         struct btrfs_fs_info *fs_info = root->fs_info;
2105         struct btrfs_path *path;
2106         struct extent_buffer *leaf;
2107         struct btrfs_extent_item *item;
2108         struct btrfs_key key;
2109         u64 bytenr = node->bytenr;
2110         u64 num_bytes = node->num_bytes;
2111         u64 refs;
2112         int ret;
2113
2114         path = btrfs_alloc_path();
2115         if (!path)
2116                 return -ENOMEM;
2117
2118         path->reada = 1;
2119         path->leave_spinning = 1;
2120         /* this will setup the path even if it fails to insert the back ref */
2121         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2122                                            bytenr, num_bytes, parent,
2123                                            root_objectid, owner, offset,
2124                                            refs_to_add, extent_op);
2125         if ((ret < 0 && ret != -EAGAIN) || !ret)
2126                 goto out;
2127
2128         /*
2129          * Ok we had -EAGAIN which means we didn't have space to insert and
2130          * inline extent ref, so just update the reference count and add a
2131          * normal backref.
2132          */
2133         leaf = path->nodes[0];
2134         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2135         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2136         refs = btrfs_extent_refs(leaf, item);
2137         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2138         if (extent_op)
2139                 __run_delayed_extent_op(extent_op, leaf, item);
2140
2141         btrfs_mark_buffer_dirty(leaf);
2142         btrfs_release_path(path);
2143
2144         path->reada = 1;
2145         path->leave_spinning = 1;
2146         /* now insert the actual backref */
2147         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2148                                     path, bytenr, parent, root_objectid,
2149                                     owner, offset, refs_to_add);
2150         if (ret)
2151                 btrfs_abort_transaction(trans, root, ret);
2152 out:
2153         btrfs_free_path(path);
2154         return ret;
2155 }
2156
2157 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2158                                 struct btrfs_root *root,
2159                                 struct btrfs_delayed_ref_node *node,
2160                                 struct btrfs_delayed_extent_op *extent_op,
2161                                 int insert_reserved)
2162 {
2163         int ret = 0;
2164         struct btrfs_delayed_data_ref *ref;
2165         struct btrfs_key ins;
2166         u64 parent = 0;
2167         u64 ref_root = 0;
2168         u64 flags = 0;
2169
2170         ins.objectid = node->bytenr;
2171         ins.offset = node->num_bytes;
2172         ins.type = BTRFS_EXTENT_ITEM_KEY;
2173
2174         ref = btrfs_delayed_node_to_data_ref(node);
2175         trace_run_delayed_data_ref(node, ref, node->action);
2176
2177         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2178                 parent = ref->parent;
2179         ref_root = ref->root;
2180
2181         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2182                 if (extent_op)
2183                         flags |= extent_op->flags_to_set;
2184                 ret = alloc_reserved_file_extent(trans, root,
2185                                                  parent, ref_root, flags,
2186                                                  ref->objectid, ref->offset,
2187                                                  &ins, node->ref_mod);
2188         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2189                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2190                                              ref_root, ref->objectid,
2191                                              ref->offset, node->ref_mod,
2192                                              extent_op);
2193         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2194                 ret = __btrfs_free_extent(trans, root, node, parent,
2195                                           ref_root, ref->objectid,
2196                                           ref->offset, node->ref_mod,
2197                                           extent_op);
2198         } else {
2199                 BUG();
2200         }
2201         return ret;
2202 }
2203
2204 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2205                                     struct extent_buffer *leaf,
2206                                     struct btrfs_extent_item *ei)
2207 {
2208         u64 flags = btrfs_extent_flags(leaf, ei);
2209         if (extent_op->update_flags) {
2210                 flags |= extent_op->flags_to_set;
2211                 btrfs_set_extent_flags(leaf, ei, flags);
2212         }
2213
2214         if (extent_op->update_key) {
2215                 struct btrfs_tree_block_info *bi;
2216                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2217                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2218                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2219         }
2220 }
2221
2222 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2223                                  struct btrfs_root *root,
2224                                  struct btrfs_delayed_ref_node *node,
2225                                  struct btrfs_delayed_extent_op *extent_op)
2226 {
2227         struct btrfs_key key;
2228         struct btrfs_path *path;
2229         struct btrfs_extent_item *ei;
2230         struct extent_buffer *leaf;
2231         u32 item_size;
2232         int ret;
2233         int err = 0;
2234         int metadata = !extent_op->is_data;
2235
2236         if (trans->aborted)
2237                 return 0;
2238
2239         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2240                 metadata = 0;
2241
2242         path = btrfs_alloc_path();
2243         if (!path)
2244                 return -ENOMEM;
2245
2246         key.objectid = node->bytenr;
2247
2248         if (metadata) {
2249                 key.type = BTRFS_METADATA_ITEM_KEY;
2250                 key.offset = extent_op->level;
2251         } else {
2252                 key.type = BTRFS_EXTENT_ITEM_KEY;
2253                 key.offset = node->num_bytes;
2254         }
2255
2256 again:
2257         path->reada = 1;
2258         path->leave_spinning = 1;
2259         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2260                                 path, 0, 1);
2261         if (ret < 0) {
2262                 err = ret;
2263                 goto out;
2264         }
2265         if (ret > 0) {
2266                 if (metadata) {
2267                         if (path->slots[0] > 0) {
2268                                 path->slots[0]--;
2269                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2270                                                       path->slots[0]);
2271                                 if (key.objectid == node->bytenr &&
2272                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2273                                     key.offset == node->num_bytes)
2274                                         ret = 0;
2275                         }
2276                         if (ret > 0) {
2277                                 btrfs_release_path(path);
2278                                 metadata = 0;
2279
2280                                 key.objectid = node->bytenr;
2281                                 key.offset = node->num_bytes;
2282                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2283                                 goto again;
2284                         }
2285                 } else {
2286                         err = -EIO;
2287                         goto out;
2288                 }
2289         }
2290
2291         leaf = path->nodes[0];
2292         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2294         if (item_size < sizeof(*ei)) {
2295                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2296                                              path, (u64)-1, 0);
2297                 if (ret < 0) {
2298                         err = ret;
2299                         goto out;
2300                 }
2301                 leaf = path->nodes[0];
2302                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2303         }
2304 #endif
2305         BUG_ON(item_size < sizeof(*ei));
2306         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2307         __run_delayed_extent_op(extent_op, leaf, ei);
2308
2309         btrfs_mark_buffer_dirty(leaf);
2310 out:
2311         btrfs_free_path(path);
2312         return err;
2313 }
2314
2315 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2316                                 struct btrfs_root *root,
2317                                 struct btrfs_delayed_ref_node *node,
2318                                 struct btrfs_delayed_extent_op *extent_op,
2319                                 int insert_reserved)
2320 {
2321         int ret = 0;
2322         struct btrfs_delayed_tree_ref *ref;
2323         struct btrfs_key ins;
2324         u64 parent = 0;
2325         u64 ref_root = 0;
2326         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2327                                                  SKINNY_METADATA);
2328
2329         ref = btrfs_delayed_node_to_tree_ref(node);
2330         trace_run_delayed_tree_ref(node, ref, node->action);
2331
2332         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2333                 parent = ref->parent;
2334         ref_root = ref->root;
2335
2336         ins.objectid = node->bytenr;
2337         if (skinny_metadata) {
2338                 ins.offset = ref->level;
2339                 ins.type = BTRFS_METADATA_ITEM_KEY;
2340         } else {
2341                 ins.offset = node->num_bytes;
2342                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2343         }
2344
2345         BUG_ON(node->ref_mod != 1);
2346         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2347                 BUG_ON(!extent_op || !extent_op->update_flags);
2348                 ret = alloc_reserved_tree_block(trans, root,
2349                                                 parent, ref_root,
2350                                                 extent_op->flags_to_set,
2351                                                 &extent_op->key,
2352                                                 ref->level, &ins);
2353         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2354                 ret = __btrfs_inc_extent_ref(trans, root, node,
2355                                              parent, ref_root,
2356                                              ref->level, 0, 1,
2357                                              extent_op);
2358         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2359                 ret = __btrfs_free_extent(trans, root, node,
2360                                           parent, ref_root,
2361                                           ref->level, 0, 1, extent_op);
2362         } else {
2363                 BUG();
2364         }
2365         return ret;
2366 }
2367
2368 /* helper function to actually process a single delayed ref entry */
2369 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2370                                struct btrfs_root *root,
2371                                struct btrfs_delayed_ref_node *node,
2372                                struct btrfs_delayed_extent_op *extent_op,
2373                                int insert_reserved)
2374 {
2375         int ret = 0;
2376
2377         if (trans->aborted) {
2378                 if (insert_reserved)
2379                         btrfs_pin_extent(root, node->bytenr,
2380                                          node->num_bytes, 1);
2381                 return 0;
2382         }
2383
2384         if (btrfs_delayed_ref_is_head(node)) {
2385                 struct btrfs_delayed_ref_head *head;
2386                 /*
2387                  * we've hit the end of the chain and we were supposed
2388                  * to insert this extent into the tree.  But, it got
2389                  * deleted before we ever needed to insert it, so all
2390                  * we have to do is clean up the accounting
2391                  */
2392                 BUG_ON(extent_op);
2393                 head = btrfs_delayed_node_to_head(node);
2394                 trace_run_delayed_ref_head(node, head, node->action);
2395
2396                 if (insert_reserved) {
2397                         btrfs_pin_extent(root, node->bytenr,
2398                                          node->num_bytes, 1);
2399                         if (head->is_data) {
2400                                 ret = btrfs_del_csums(trans, root,
2401                                                       node->bytenr,
2402                                                       node->num_bytes);
2403                         }
2404                 }
2405
2406                 /* Also free its reserved qgroup space */
2407                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2408                                               head->qgroup_ref_root,
2409                                               head->qgroup_reserved);
2410                 return ret;
2411         }
2412
2413         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2414             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2415                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2416                                            insert_reserved);
2417         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2418                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2419                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2420                                            insert_reserved);
2421         else
2422                 BUG();
2423         return ret;
2424 }
2425
2426 static inline struct btrfs_delayed_ref_node *
2427 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2428 {
2429         struct btrfs_delayed_ref_node *ref;
2430
2431         if (list_empty(&head->ref_list))
2432                 return NULL;
2433
2434         /*
2435          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2436          * This is to prevent a ref count from going down to zero, which deletes
2437          * the extent item from the extent tree, when there still are references
2438          * to add, which would fail because they would not find the extent item.
2439          */
2440         list_for_each_entry(ref, &head->ref_list, list) {
2441                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2442                         return ref;
2443         }
2444
2445         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2446                           list);
2447 }
2448
2449 /*
2450  * Returns 0 on success or if called with an already aborted transaction.
2451  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2452  */
2453 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2454                                              struct btrfs_root *root,
2455                                              unsigned long nr)
2456 {
2457         struct btrfs_delayed_ref_root *delayed_refs;
2458         struct btrfs_delayed_ref_node *ref;
2459         struct btrfs_delayed_ref_head *locked_ref = NULL;
2460         struct btrfs_delayed_extent_op *extent_op;
2461         struct btrfs_fs_info *fs_info = root->fs_info;
2462         ktime_t start = ktime_get();
2463         int ret;
2464         unsigned long count = 0;
2465         unsigned long actual_count = 0;
2466         int must_insert_reserved = 0;
2467
2468         delayed_refs = &trans->transaction->delayed_refs;
2469         while (1) {
2470                 if (!locked_ref) {
2471                         if (count >= nr)
2472                                 break;
2473
2474                         spin_lock(&delayed_refs->lock);
2475                         locked_ref = btrfs_select_ref_head(trans);
2476                         if (!locked_ref) {
2477                                 spin_unlock(&delayed_refs->lock);
2478                                 break;
2479                         }
2480
2481                         /* grab the lock that says we are going to process
2482                          * all the refs for this head */
2483                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2484                         spin_unlock(&delayed_refs->lock);
2485                         /*
2486                          * we may have dropped the spin lock to get the head
2487                          * mutex lock, and that might have given someone else
2488                          * time to free the head.  If that's true, it has been
2489                          * removed from our list and we can move on.
2490                          */
2491                         if (ret == -EAGAIN) {
2492                                 locked_ref = NULL;
2493                                 count++;
2494                                 continue;
2495                         }
2496                 }
2497
2498                 /*
2499                  * We need to try and merge add/drops of the same ref since we
2500                  * can run into issues with relocate dropping the implicit ref
2501                  * and then it being added back again before the drop can
2502                  * finish.  If we merged anything we need to re-loop so we can
2503                  * get a good ref.
2504                  * Or we can get node references of the same type that weren't
2505                  * merged when created due to bumps in the tree mod seq, and
2506                  * we need to merge them to prevent adding an inline extent
2507                  * backref before dropping it (triggering a BUG_ON at
2508                  * insert_inline_extent_backref()).
2509                  */
2510                 spin_lock(&locked_ref->lock);
2511                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2512                                          locked_ref);
2513
2514                 /*
2515                  * locked_ref is the head node, so we have to go one
2516                  * node back for any delayed ref updates
2517                  */
2518                 ref = select_delayed_ref(locked_ref);
2519
2520                 if (ref && ref->seq &&
2521                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2522                         spin_unlock(&locked_ref->lock);
2523                         btrfs_delayed_ref_unlock(locked_ref);
2524                         spin_lock(&delayed_refs->lock);
2525                         locked_ref->processing = 0;
2526                         delayed_refs->num_heads_ready++;
2527                         spin_unlock(&delayed_refs->lock);
2528                         locked_ref = NULL;
2529                         cond_resched();
2530                         count++;
2531                         continue;
2532                 }
2533
2534                 /*
2535                  * record the must insert reserved flag before we
2536                  * drop the spin lock.
2537                  */
2538                 must_insert_reserved = locked_ref->must_insert_reserved;
2539                 locked_ref->must_insert_reserved = 0;
2540
2541                 extent_op = locked_ref->extent_op;
2542                 locked_ref->extent_op = NULL;
2543
2544                 if (!ref) {
2545
2546
2547                         /* All delayed refs have been processed, Go ahead
2548                          * and send the head node to run_one_delayed_ref,
2549                          * so that any accounting fixes can happen
2550                          */
2551                         ref = &locked_ref->node;
2552
2553                         if (extent_op && must_insert_reserved) {
2554                                 btrfs_free_delayed_extent_op(extent_op);
2555                                 extent_op = NULL;
2556                         }
2557
2558                         if (extent_op) {
2559                                 spin_unlock(&locked_ref->lock);
2560                                 ret = run_delayed_extent_op(trans, root,
2561                                                             ref, extent_op);
2562                                 btrfs_free_delayed_extent_op(extent_op);
2563
2564                                 if (ret) {
2565                                         /*
2566                                          * Need to reset must_insert_reserved if
2567                                          * there was an error so the abort stuff
2568                                          * can cleanup the reserved space
2569                                          * properly.
2570                                          */
2571                                         if (must_insert_reserved)
2572                                                 locked_ref->must_insert_reserved = 1;
2573                                         locked_ref->processing = 0;
2574                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2575                                         btrfs_delayed_ref_unlock(locked_ref);
2576                                         return ret;
2577                                 }
2578                                 continue;
2579                         }
2580
2581                         /*
2582                          * Need to drop our head ref lock and re-aqcuire the
2583                          * delayed ref lock and then re-check to make sure
2584                          * nobody got added.
2585                          */
2586                         spin_unlock(&locked_ref->lock);
2587                         spin_lock(&delayed_refs->lock);
2588                         spin_lock(&locked_ref->lock);
2589                         if (!list_empty(&locked_ref->ref_list) ||
2590                             locked_ref->extent_op) {
2591                                 spin_unlock(&locked_ref->lock);
2592                                 spin_unlock(&delayed_refs->lock);
2593                                 continue;
2594                         }
2595                         ref->in_tree = 0;
2596                         delayed_refs->num_heads--;
2597                         rb_erase(&locked_ref->href_node,
2598                                  &delayed_refs->href_root);
2599                         spin_unlock(&delayed_refs->lock);
2600                 } else {
2601                         actual_count++;
2602                         ref->in_tree = 0;
2603                         list_del(&ref->list);
2604                 }
2605                 atomic_dec(&delayed_refs->num_entries);
2606
2607                 if (!btrfs_delayed_ref_is_head(ref)) {
2608                         /*
2609                          * when we play the delayed ref, also correct the
2610                          * ref_mod on head
2611                          */
2612                         switch (ref->action) {
2613                         case BTRFS_ADD_DELAYED_REF:
2614                         case BTRFS_ADD_DELAYED_EXTENT:
2615                                 locked_ref->node.ref_mod -= ref->ref_mod;
2616                                 break;
2617                         case BTRFS_DROP_DELAYED_REF:
2618                                 locked_ref->node.ref_mod += ref->ref_mod;
2619                                 break;
2620                         default:
2621                                 WARN_ON(1);
2622                         }
2623                 }
2624                 spin_unlock(&locked_ref->lock);
2625
2626                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2627                                           must_insert_reserved);
2628
2629                 btrfs_free_delayed_extent_op(extent_op);
2630                 if (ret) {
2631                         locked_ref->processing = 0;
2632                         btrfs_delayed_ref_unlock(locked_ref);
2633                         btrfs_put_delayed_ref(ref);
2634                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2635                         return ret;
2636                 }
2637
2638                 /*
2639                  * If this node is a head, that means all the refs in this head
2640                  * have been dealt with, and we will pick the next head to deal
2641                  * with, so we must unlock the head and drop it from the cluster
2642                  * list before we release it.
2643                  */
2644                 if (btrfs_delayed_ref_is_head(ref)) {
2645                         if (locked_ref->is_data &&
2646                             locked_ref->total_ref_mod < 0) {
2647                                 spin_lock(&delayed_refs->lock);
2648                                 delayed_refs->pending_csums -= ref->num_bytes;
2649                                 spin_unlock(&delayed_refs->lock);
2650                         }
2651                         btrfs_delayed_ref_unlock(locked_ref);
2652                         locked_ref = NULL;
2653                 }
2654                 btrfs_put_delayed_ref(ref);
2655                 count++;
2656                 cond_resched();
2657         }
2658
2659         /*
2660          * We don't want to include ref heads since we can have empty ref heads
2661          * and those will drastically skew our runtime down since we just do
2662          * accounting, no actual extent tree updates.
2663          */
2664         if (actual_count > 0) {
2665                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2666                 u64 avg;
2667
2668                 /*
2669                  * We weigh the current average higher than our current runtime
2670                  * to avoid large swings in the average.
2671                  */
2672                 spin_lock(&delayed_refs->lock);
2673                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2674                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2675                 spin_unlock(&delayed_refs->lock);
2676         }
2677         return 0;
2678 }
2679
2680 #ifdef SCRAMBLE_DELAYED_REFS
2681 /*
2682  * Normally delayed refs get processed in ascending bytenr order. This
2683  * correlates in most cases to the order added. To expose dependencies on this
2684  * order, we start to process the tree in the middle instead of the beginning
2685  */
2686 static u64 find_middle(struct rb_root *root)
2687 {
2688         struct rb_node *n = root->rb_node;
2689         struct btrfs_delayed_ref_node *entry;
2690         int alt = 1;
2691         u64 middle;
2692         u64 first = 0, last = 0;
2693
2694         n = rb_first(root);
2695         if (n) {
2696                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2697                 first = entry->bytenr;
2698         }
2699         n = rb_last(root);
2700         if (n) {
2701                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2702                 last = entry->bytenr;
2703         }
2704         n = root->rb_node;
2705
2706         while (n) {
2707                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2708                 WARN_ON(!entry->in_tree);
2709
2710                 middle = entry->bytenr;
2711
2712                 if (alt)
2713                         n = n->rb_left;
2714                 else
2715                         n = n->rb_right;
2716
2717                 alt = 1 - alt;
2718         }
2719         return middle;
2720 }
2721 #endif
2722
2723 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2724 {
2725         u64 num_bytes;
2726
2727         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2728                              sizeof(struct btrfs_extent_inline_ref));
2729         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2730                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2731
2732         /*
2733          * We don't ever fill up leaves all the way so multiply by 2 just to be
2734          * closer to what we're really going to want to ouse.
2735          */
2736         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2737 }
2738
2739 /*
2740  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2741  * would require to store the csums for that many bytes.
2742  */
2743 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2744 {
2745         u64 csum_size;
2746         u64 num_csums_per_leaf;
2747         u64 num_csums;
2748
2749         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2750         num_csums_per_leaf = div64_u64(csum_size,
2751                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2752         num_csums = div64_u64(csum_bytes, root->sectorsize);
2753         num_csums += num_csums_per_leaf - 1;
2754         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2755         return num_csums;
2756 }
2757
2758 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2759                                        struct btrfs_root *root)
2760 {
2761         struct btrfs_block_rsv *global_rsv;
2762         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2763         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2764         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2765         u64 num_bytes, num_dirty_bgs_bytes;
2766         int ret = 0;
2767
2768         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2769         num_heads = heads_to_leaves(root, num_heads);
2770         if (num_heads > 1)
2771                 num_bytes += (num_heads - 1) * root->nodesize;
2772         num_bytes <<= 1;
2773         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2774         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2775                                                              num_dirty_bgs);
2776         global_rsv = &root->fs_info->global_block_rsv;
2777
2778         /*
2779          * If we can't allocate any more chunks lets make sure we have _lots_ of
2780          * wiggle room since running delayed refs can create more delayed refs.
2781          */
2782         if (global_rsv->space_info->full) {
2783                 num_dirty_bgs_bytes <<= 1;
2784                 num_bytes <<= 1;
2785         }
2786
2787         spin_lock(&global_rsv->lock);
2788         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2789                 ret = 1;
2790         spin_unlock(&global_rsv->lock);
2791         return ret;
2792 }
2793
2794 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2795                                        struct btrfs_root *root)
2796 {
2797         struct btrfs_fs_info *fs_info = root->fs_info;
2798         u64 num_entries =
2799                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2800         u64 avg_runtime;
2801         u64 val;
2802
2803         smp_mb();
2804         avg_runtime = fs_info->avg_delayed_ref_runtime;
2805         val = num_entries * avg_runtime;
2806         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2807                 return 1;
2808         if (val >= NSEC_PER_SEC / 2)
2809                 return 2;
2810
2811         return btrfs_check_space_for_delayed_refs(trans, root);
2812 }
2813
2814 struct async_delayed_refs {
2815         struct btrfs_root *root;
2816         int count;
2817         int error;
2818         int sync;
2819         struct completion wait;
2820         struct btrfs_work work;
2821 };
2822
2823 static void delayed_ref_async_start(struct btrfs_work *work)
2824 {
2825         struct async_delayed_refs *async;
2826         struct btrfs_trans_handle *trans;
2827         int ret;
2828
2829         async = container_of(work, struct async_delayed_refs, work);
2830
2831         trans = btrfs_join_transaction(async->root);
2832         if (IS_ERR(trans)) {
2833                 async->error = PTR_ERR(trans);
2834                 goto done;
2835         }
2836
2837         /*
2838          * trans->sync means that when we call end_transaciton, we won't
2839          * wait on delayed refs
2840          */
2841         trans->sync = true;
2842         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2843         if (ret)
2844                 async->error = ret;
2845
2846         ret = btrfs_end_transaction(trans, async->root);
2847         if (ret && !async->error)
2848                 async->error = ret;
2849 done:
2850         if (async->sync)
2851                 complete(&async->wait);
2852         else
2853                 kfree(async);
2854 }
2855
2856 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2857                                  unsigned long count, int wait)
2858 {
2859         struct async_delayed_refs *async;
2860         int ret;
2861
2862         async = kmalloc(sizeof(*async), GFP_NOFS);
2863         if (!async)
2864                 return -ENOMEM;
2865
2866         async->root = root->fs_info->tree_root;
2867         async->count = count;
2868         async->error = 0;
2869         if (wait)
2870                 async->sync = 1;
2871         else
2872                 async->sync = 0;
2873         init_completion(&async->wait);
2874
2875         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2876                         delayed_ref_async_start, NULL, NULL);
2877
2878         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2879
2880         if (wait) {
2881                 wait_for_completion(&async->wait);
2882                 ret = async->error;
2883                 kfree(async);
2884                 return ret;
2885         }
2886         return 0;
2887 }
2888
2889 /*
2890  * this starts processing the delayed reference count updates and
2891  * extent insertions we have queued up so far.  count can be
2892  * 0, which means to process everything in the tree at the start
2893  * of the run (but not newly added entries), or it can be some target
2894  * number you'd like to process.
2895  *
2896  * Returns 0 on success or if called with an aborted transaction
2897  * Returns <0 on error and aborts the transaction
2898  */
2899 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2900                            struct btrfs_root *root, unsigned long count)
2901 {
2902         struct rb_node *node;
2903         struct btrfs_delayed_ref_root *delayed_refs;
2904         struct btrfs_delayed_ref_head *head;
2905         int ret;
2906         int run_all = count == (unsigned long)-1;
2907         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2908
2909         /* We'll clean this up in btrfs_cleanup_transaction */
2910         if (trans->aborted)
2911                 return 0;
2912
2913         if (root == root->fs_info->extent_root)
2914                 root = root->fs_info->tree_root;
2915
2916         delayed_refs = &trans->transaction->delayed_refs;
2917         if (count == 0)
2918                 count = atomic_read(&delayed_refs->num_entries) * 2;
2919
2920 again:
2921 #ifdef SCRAMBLE_DELAYED_REFS
2922         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2923 #endif
2924         trans->can_flush_pending_bgs = false;
2925         ret = __btrfs_run_delayed_refs(trans, root, count);
2926         if (ret < 0) {
2927                 btrfs_abort_transaction(trans, root, ret);
2928                 return ret;
2929         }
2930
2931         if (run_all) {
2932                 if (!list_empty(&trans->new_bgs))
2933                         btrfs_create_pending_block_groups(trans, root);
2934
2935                 spin_lock(&delayed_refs->lock);
2936                 node = rb_first(&delayed_refs->href_root);
2937                 if (!node) {
2938                         spin_unlock(&delayed_refs->lock);
2939                         goto out;
2940                 }
2941                 count = (unsigned long)-1;
2942
2943                 while (node) {
2944                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2945                                         href_node);
2946                         if (btrfs_delayed_ref_is_head(&head->node)) {
2947                                 struct btrfs_delayed_ref_node *ref;
2948
2949                                 ref = &head->node;
2950                                 atomic_inc(&ref->refs);
2951
2952                                 spin_unlock(&delayed_refs->lock);
2953                                 /*
2954                                  * Mutex was contended, block until it's
2955                                  * released and try again
2956                                  */
2957                                 mutex_lock(&head->mutex);
2958                                 mutex_unlock(&head->mutex);
2959
2960                                 btrfs_put_delayed_ref(ref);
2961                                 cond_resched();
2962                                 goto again;
2963                         } else {
2964                                 WARN_ON(1);
2965                         }
2966                         node = rb_next(node);
2967                 }
2968                 spin_unlock(&delayed_refs->lock);
2969                 cond_resched();
2970                 goto again;
2971         }
2972 out:
2973         assert_qgroups_uptodate(trans);
2974         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2975         return 0;
2976 }
2977
2978 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2979                                 struct btrfs_root *root,
2980                                 u64 bytenr, u64 num_bytes, u64 flags,
2981                                 int level, int is_data)
2982 {
2983         struct btrfs_delayed_extent_op *extent_op;
2984         int ret;
2985
2986         extent_op = btrfs_alloc_delayed_extent_op();
2987         if (!extent_op)
2988                 return -ENOMEM;
2989
2990         extent_op->flags_to_set = flags;
2991         extent_op->update_flags = 1;
2992         extent_op->update_key = 0;
2993         extent_op->is_data = is_data ? 1 : 0;
2994         extent_op->level = level;
2995
2996         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2997                                           num_bytes, extent_op);
2998         if (ret)
2999                 btrfs_free_delayed_extent_op(extent_op);
3000         return ret;
3001 }
3002
3003 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3004                                       struct btrfs_root *root,
3005                                       struct btrfs_path *path,
3006                                       u64 objectid, u64 offset, u64 bytenr)
3007 {
3008         struct btrfs_delayed_ref_head *head;
3009         struct btrfs_delayed_ref_node *ref;
3010         struct btrfs_delayed_data_ref *data_ref;
3011         struct btrfs_delayed_ref_root *delayed_refs;
3012         int ret = 0;
3013
3014         delayed_refs = &trans->transaction->delayed_refs;
3015         spin_lock(&delayed_refs->lock);
3016         head = btrfs_find_delayed_ref_head(trans, bytenr);
3017         if (!head) {
3018                 spin_unlock(&delayed_refs->lock);
3019                 return 0;
3020         }
3021
3022         if (!mutex_trylock(&head->mutex)) {
3023                 atomic_inc(&head->node.refs);
3024                 spin_unlock(&delayed_refs->lock);
3025
3026                 btrfs_release_path(path);
3027
3028                 /*
3029                  * Mutex was contended, block until it's released and let
3030                  * caller try again
3031                  */
3032                 mutex_lock(&head->mutex);
3033                 mutex_unlock(&head->mutex);
3034                 btrfs_put_delayed_ref(&head->node);
3035                 return -EAGAIN;
3036         }
3037         spin_unlock(&delayed_refs->lock);
3038
3039         spin_lock(&head->lock);
3040         list_for_each_entry(ref, &head->ref_list, list) {
3041                 /* If it's a shared ref we know a cross reference exists */
3042                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3043                         ret = 1;
3044                         break;
3045                 }
3046
3047                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3048
3049                 /*
3050                  * If our ref doesn't match the one we're currently looking at
3051                  * then we have a cross reference.
3052                  */
3053                 if (data_ref->root != root->root_key.objectid ||
3054                     data_ref->objectid != objectid ||
3055                     data_ref->offset != offset) {
3056                         ret = 1;
3057                         break;
3058                 }
3059         }
3060         spin_unlock(&head->lock);
3061         mutex_unlock(&head->mutex);
3062         return ret;
3063 }
3064
3065 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3066                                         struct btrfs_root *root,
3067                                         struct btrfs_path *path,
3068                                         u64 objectid, u64 offset, u64 bytenr)
3069 {
3070         struct btrfs_root *extent_root = root->fs_info->extent_root;
3071         struct extent_buffer *leaf;
3072         struct btrfs_extent_data_ref *ref;
3073         struct btrfs_extent_inline_ref *iref;
3074         struct btrfs_extent_item *ei;
3075         struct btrfs_key key;
3076         u32 item_size;
3077         int ret;
3078
3079         key.objectid = bytenr;
3080         key.offset = (u64)-1;
3081         key.type = BTRFS_EXTENT_ITEM_KEY;
3082
3083         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3084         if (ret < 0)
3085                 goto out;
3086         BUG_ON(ret == 0); /* Corruption */
3087
3088         ret = -ENOENT;
3089         if (path->slots[0] == 0)
3090                 goto out;
3091
3092         path->slots[0]--;
3093         leaf = path->nodes[0];
3094         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3095
3096         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3097                 goto out;
3098
3099         ret = 1;
3100         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3101 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3102         if (item_size < sizeof(*ei)) {
3103                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3104                 goto out;
3105         }
3106 #endif
3107         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3108
3109         if (item_size != sizeof(*ei) +
3110             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3111                 goto out;
3112
3113         if (btrfs_extent_generation(leaf, ei) <=
3114             btrfs_root_last_snapshot(&root->root_item))
3115                 goto out;
3116
3117         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3118         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3119             BTRFS_EXTENT_DATA_REF_KEY)
3120                 goto out;
3121
3122         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3123         if (btrfs_extent_refs(leaf, ei) !=
3124             btrfs_extent_data_ref_count(leaf, ref) ||
3125             btrfs_extent_data_ref_root(leaf, ref) !=
3126             root->root_key.objectid ||
3127             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3128             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3129                 goto out;
3130
3131         ret = 0;
3132 out:
3133         return ret;
3134 }
3135
3136 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3137                           struct btrfs_root *root,
3138                           u64 objectid, u64 offset, u64 bytenr)
3139 {
3140         struct btrfs_path *path;
3141         int ret;
3142         int ret2;
3143
3144         path = btrfs_alloc_path();
3145         if (!path)
3146                 return -ENOENT;
3147
3148         do {
3149                 ret = check_committed_ref(trans, root, path, objectid,
3150                                           offset, bytenr);
3151                 if (ret && ret != -ENOENT)
3152                         goto out;
3153
3154                 ret2 = check_delayed_ref(trans, root, path, objectid,
3155                                          offset, bytenr);
3156         } while (ret2 == -EAGAIN);
3157
3158         if (ret2 && ret2 != -ENOENT) {
3159                 ret = ret2;
3160                 goto out;
3161         }
3162
3163         if (ret != -ENOENT || ret2 != -ENOENT)
3164                 ret = 0;
3165 out:
3166         btrfs_free_path(path);
3167         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3168                 WARN_ON(ret > 0);
3169         return ret;
3170 }
3171
3172 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3173                            struct btrfs_root *root,
3174                            struct extent_buffer *buf,
3175                            int full_backref, int inc)
3176 {
3177         u64 bytenr;
3178         u64 num_bytes;
3179         u64 parent;
3180         u64 ref_root;
3181         u32 nritems;
3182         struct btrfs_key key;
3183         struct btrfs_file_extent_item *fi;
3184         int i;
3185         int level;
3186         int ret = 0;
3187         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3188                             u64, u64, u64, u64, u64, u64);
3189
3190
3191         if (btrfs_test_is_dummy_root(root))
3192                 return 0;
3193
3194         ref_root = btrfs_header_owner(buf);
3195         nritems = btrfs_header_nritems(buf);
3196         level = btrfs_header_level(buf);
3197
3198         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3199                 return 0;
3200
3201         if (inc)
3202                 process_func = btrfs_inc_extent_ref;
3203         else
3204                 process_func = btrfs_free_extent;
3205
3206         if (full_backref)
3207                 parent = buf->start;
3208         else
3209                 parent = 0;
3210
3211         for (i = 0; i < nritems; i++) {
3212                 if (level == 0) {
3213                         btrfs_item_key_to_cpu(buf, &key, i);
3214                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3215                                 continue;
3216                         fi = btrfs_item_ptr(buf, i,
3217                                             struct btrfs_file_extent_item);
3218                         if (btrfs_file_extent_type(buf, fi) ==
3219                             BTRFS_FILE_EXTENT_INLINE)
3220                                 continue;
3221                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3222                         if (bytenr == 0)
3223                                 continue;
3224
3225                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3226                         key.offset -= btrfs_file_extent_offset(buf, fi);
3227                         ret = process_func(trans, root, bytenr, num_bytes,
3228                                            parent, ref_root, key.objectid,
3229                                            key.offset);
3230                         if (ret)
3231                                 goto fail;
3232                 } else {
3233                         bytenr = btrfs_node_blockptr(buf, i);
3234                         num_bytes = root->nodesize;
3235                         ret = process_func(trans, root, bytenr, num_bytes,
3236                                            parent, ref_root, level - 1, 0);
3237                         if (ret)
3238                                 goto fail;
3239                 }
3240         }
3241         return 0;
3242 fail:
3243         return ret;
3244 }
3245
3246 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3247                   struct extent_buffer *buf, int full_backref)
3248 {
3249         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3250 }
3251
3252 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3253                   struct extent_buffer *buf, int full_backref)
3254 {
3255         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3256 }
3257
3258 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3259                                  struct btrfs_root *root,
3260                                  struct btrfs_path *path,
3261                                  struct btrfs_block_group_cache *cache)
3262 {
3263         int ret;
3264         struct btrfs_root *extent_root = root->fs_info->extent_root;
3265         unsigned long bi;
3266         struct extent_buffer *leaf;
3267
3268         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3269         if (ret) {
3270                 if (ret > 0)
3271                         ret = -ENOENT;
3272                 goto fail;
3273         }
3274
3275         leaf = path->nodes[0];
3276         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3277         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3278         btrfs_mark_buffer_dirty(leaf);
3279 fail:
3280         btrfs_release_path(path);
3281         return ret;
3282
3283 }
3284
3285 static struct btrfs_block_group_cache *
3286 next_block_group(struct btrfs_root *root,
3287                  struct btrfs_block_group_cache *cache)
3288 {
3289         struct rb_node *node;
3290
3291         spin_lock(&root->fs_info->block_group_cache_lock);
3292
3293         /* If our block group was removed, we need a full search. */
3294         if (RB_EMPTY_NODE(&cache->cache_node)) {
3295                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3296
3297                 spin_unlock(&root->fs_info->block_group_cache_lock);
3298                 btrfs_put_block_group(cache);
3299                 cache = btrfs_lookup_first_block_group(root->fs_info,
3300                                                        next_bytenr);
3301                 return cache;
3302         }
3303         node = rb_next(&cache->cache_node);
3304         btrfs_put_block_group(cache);
3305         if (node) {
3306                 cache = rb_entry(node, struct btrfs_block_group_cache,
3307                                  cache_node);
3308                 btrfs_get_block_group(cache);
3309         } else
3310                 cache = NULL;
3311         spin_unlock(&root->fs_info->block_group_cache_lock);
3312         return cache;
3313 }
3314
3315 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3316                             struct btrfs_trans_handle *trans,
3317                             struct btrfs_path *path)
3318 {
3319         struct btrfs_root *root = block_group->fs_info->tree_root;
3320         struct inode *inode = NULL;
3321         u64 alloc_hint = 0;
3322         int dcs = BTRFS_DC_ERROR;
3323         u64 num_pages = 0;
3324         int retries = 0;
3325         int ret = 0;
3326
3327         /*
3328          * If this block group is smaller than 100 megs don't bother caching the
3329          * block group.
3330          */
3331         if (block_group->key.offset < (100 * 1024 * 1024)) {
3332                 spin_lock(&block_group->lock);
3333                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3334                 spin_unlock(&block_group->lock);
3335                 return 0;
3336         }
3337
3338         if (trans->aborted)
3339                 return 0;
3340 again:
3341         inode = lookup_free_space_inode(root, block_group, path);
3342         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3343                 ret = PTR_ERR(inode);
3344                 btrfs_release_path(path);
3345                 goto out;
3346         }
3347
3348         if (IS_ERR(inode)) {
3349                 BUG_ON(retries);
3350                 retries++;
3351
3352                 if (block_group->ro)
3353                         goto out_free;
3354
3355                 ret = create_free_space_inode(root, trans, block_group, path);
3356                 if (ret)
3357                         goto out_free;
3358                 goto again;
3359         }
3360
3361         /* We've already setup this transaction, go ahead and exit */
3362         if (block_group->cache_generation == trans->transid &&
3363             i_size_read(inode)) {
3364                 dcs = BTRFS_DC_SETUP;
3365                 goto out_put;
3366         }
3367
3368         /*
3369          * We want to set the generation to 0, that way if anything goes wrong
3370          * from here on out we know not to trust this cache when we load up next
3371          * time.
3372          */
3373         BTRFS_I(inode)->generation = 0;
3374         ret = btrfs_update_inode(trans, root, inode);
3375         if (ret) {
3376                 /*
3377                  * So theoretically we could recover from this, simply set the
3378                  * super cache generation to 0 so we know to invalidate the
3379                  * cache, but then we'd have to keep track of the block groups
3380                  * that fail this way so we know we _have_ to reset this cache
3381                  * before the next commit or risk reading stale cache.  So to
3382                  * limit our exposure to horrible edge cases lets just abort the
3383                  * transaction, this only happens in really bad situations
3384                  * anyway.
3385                  */
3386                 btrfs_abort_transaction(trans, root, ret);
3387                 goto out_put;
3388         }
3389         WARN_ON(ret);
3390
3391         if (i_size_read(inode) > 0) {
3392                 ret = btrfs_check_trunc_cache_free_space(root,
3393                                         &root->fs_info->global_block_rsv);
3394                 if (ret)
3395                         goto out_put;
3396
3397                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3398                 if (ret)
3399                         goto out_put;
3400         }
3401
3402         spin_lock(&block_group->lock);
3403         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3404             !btrfs_test_opt(root, SPACE_CACHE)) {
3405                 /*
3406                  * don't bother trying to write stuff out _if_
3407                  * a) we're not cached,
3408                  * b) we're with nospace_cache mount option.
3409                  */
3410                 dcs = BTRFS_DC_WRITTEN;
3411                 spin_unlock(&block_group->lock);
3412                 goto out_put;
3413         }
3414         spin_unlock(&block_group->lock);
3415
3416         /*
3417          * We hit an ENOSPC when setting up the cache in this transaction, just
3418          * skip doing the setup, we've already cleared the cache so we're safe.
3419          */
3420         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3421                 ret = -ENOSPC;
3422                 goto out_put;
3423         }
3424
3425         /*
3426          * Try to preallocate enough space based on how big the block group is.
3427          * Keep in mind this has to include any pinned space which could end up
3428          * taking up quite a bit since it's not folded into the other space
3429          * cache.
3430          */
3431         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3432         if (!num_pages)
3433                 num_pages = 1;
3434
3435         num_pages *= 16;
3436         num_pages *= PAGE_CACHE_SIZE;
3437
3438         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3439         if (ret)
3440                 goto out_put;
3441
3442         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3443                                               num_pages, num_pages,
3444                                               &alloc_hint);
3445         /*
3446          * Our cache requires contiguous chunks so that we don't modify a bunch
3447          * of metadata or split extents when writing the cache out, which means
3448          * we can enospc if we are heavily fragmented in addition to just normal
3449          * out of space conditions.  So if we hit this just skip setting up any
3450          * other block groups for this transaction, maybe we'll unpin enough
3451          * space the next time around.
3452          */
3453         if (!ret)
3454                 dcs = BTRFS_DC_SETUP;
3455         else if (ret == -ENOSPC)
3456                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3457         btrfs_free_reserved_data_space(inode, 0, num_pages);
3458
3459 out_put:
3460         iput(inode);
3461 out_free:
3462         btrfs_release_path(path);
3463 out:
3464         spin_lock(&block_group->lock);
3465         if (!ret && dcs == BTRFS_DC_SETUP)
3466                 block_group->cache_generation = trans->transid;
3467         block_group->disk_cache_state = dcs;
3468         spin_unlock(&block_group->lock);
3469
3470         return ret;
3471 }
3472
3473 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3474                             struct btrfs_root *root)
3475 {
3476         struct btrfs_block_group_cache *cache, *tmp;
3477         struct btrfs_transaction *cur_trans = trans->transaction;
3478         struct btrfs_path *path;
3479
3480         if (list_empty(&cur_trans->dirty_bgs) ||
3481             !btrfs_test_opt(root, SPACE_CACHE))
3482                 return 0;
3483
3484         path = btrfs_alloc_path();
3485         if (!path)
3486                 return -ENOMEM;
3487
3488         /* Could add new block groups, use _safe just in case */
3489         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3490                                  dirty_list) {
3491                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3492                         cache_save_setup(cache, trans, path);
3493         }
3494
3495         btrfs_free_path(path);
3496         return 0;
3497 }
3498
3499 /*
3500  * transaction commit does final block group cache writeback during a
3501  * critical section where nothing is allowed to change the FS.  This is
3502  * required in order for the cache to actually match the block group,
3503  * but can introduce a lot of latency into the commit.
3504  *
3505  * So, btrfs_start_dirty_block_groups is here to kick off block group
3506  * cache IO.  There's a chance we'll have to redo some of it if the
3507  * block group changes again during the commit, but it greatly reduces
3508  * the commit latency by getting rid of the easy block groups while
3509  * we're still allowing others to join the commit.
3510  */
3511 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3512                                    struct btrfs_root *root)
3513 {
3514         struct btrfs_block_group_cache *cache;
3515         struct btrfs_transaction *cur_trans = trans->transaction;
3516         int ret = 0;
3517         int should_put;
3518         struct btrfs_path *path = NULL;
3519         LIST_HEAD(dirty);
3520         struct list_head *io = &cur_trans->io_bgs;
3521         int num_started = 0;
3522         int loops = 0;
3523
3524         spin_lock(&cur_trans->dirty_bgs_lock);
3525         if (list_empty(&cur_trans->dirty_bgs)) {
3526                 spin_unlock(&cur_trans->dirty_bgs_lock);
3527                 return 0;
3528         }
3529         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3530         spin_unlock(&cur_trans->dirty_bgs_lock);
3531
3532 again:
3533         /*
3534          * make sure all the block groups on our dirty list actually
3535          * exist
3536          */
3537         btrfs_create_pending_block_groups(trans, root);
3538
3539         if (!path) {
3540                 path = btrfs_alloc_path();
3541                 if (!path)
3542                         return -ENOMEM;
3543         }
3544
3545         /*
3546          * cache_write_mutex is here only to save us from balance or automatic
3547          * removal of empty block groups deleting this block group while we are
3548          * writing out the cache
3549          */
3550         mutex_lock(&trans->transaction->cache_write_mutex);
3551         while (!list_empty(&dirty)) {
3552                 cache = list_first_entry(&dirty,
3553                                          struct btrfs_block_group_cache,
3554                                          dirty_list);
3555                 /*
3556                  * this can happen if something re-dirties a block
3557                  * group that is already under IO.  Just wait for it to
3558                  * finish and then do it all again
3559                  */
3560                 if (!list_empty(&cache->io_list)) {
3561                         list_del_init(&cache->io_list);
3562                         btrfs_wait_cache_io(root, trans, cache,
3563                                             &cache->io_ctl, path,
3564                                             cache->key.objectid);
3565                         btrfs_put_block_group(cache);
3566                 }
3567
3568
3569                 /*
3570                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3571                  * if it should update the cache_state.  Don't delete
3572                  * until after we wait.
3573                  *
3574                  * Since we're not running in the commit critical section
3575                  * we need the dirty_bgs_lock to protect from update_block_group
3576                  */
3577                 spin_lock(&cur_trans->dirty_bgs_lock);
3578                 list_del_init(&cache->dirty_list);
3579                 spin_unlock(&cur_trans->dirty_bgs_lock);
3580
3581                 should_put = 1;
3582
3583                 cache_save_setup(cache, trans, path);
3584
3585                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3586                         cache->io_ctl.inode = NULL;
3587                         ret = btrfs_write_out_cache(root, trans, cache, path);
3588                         if (ret == 0 && cache->io_ctl.inode) {
3589                                 num_started++;
3590                                 should_put = 0;
3591
3592                                 /*
3593                                  * the cache_write_mutex is protecting
3594                                  * the io_list
3595                                  */
3596                                 list_add_tail(&cache->io_list, io);
3597                         } else {
3598                                 /*
3599                                  * if we failed to write the cache, the
3600                                  * generation will be bad and life goes on
3601                                  */
3602                                 ret = 0;
3603                         }
3604                 }
3605                 if (!ret) {
3606                         ret = write_one_cache_group(trans, root, path, cache);
3607                         /*
3608                          * Our block group might still be attached to the list
3609                          * of new block groups in the transaction handle of some
3610                          * other task (struct btrfs_trans_handle->new_bgs). This
3611                          * means its block group item isn't yet in the extent
3612                          * tree. If this happens ignore the error, as we will
3613                          * try again later in the critical section of the
3614                          * transaction commit.
3615                          */
3616                         if (ret == -ENOENT) {
3617                                 ret = 0;
3618                                 spin_lock(&cur_trans->dirty_bgs_lock);
3619                                 if (list_empty(&cache->dirty_list)) {
3620                                         list_add_tail(&cache->dirty_list,
3621                                                       &cur_trans->dirty_bgs);
3622                                         btrfs_get_block_group(cache);
3623                                 }
3624                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3625                         } else if (ret) {
3626                                 btrfs_abort_transaction(trans, root, ret);
3627                         }
3628                 }
3629
3630                 /* if its not on the io list, we need to put the block group */
3631                 if (should_put)
3632                         btrfs_put_block_group(cache);
3633
3634                 if (ret)
3635                         break;
3636
3637                 /*
3638                  * Avoid blocking other tasks for too long. It might even save
3639                  * us from writing caches for block groups that are going to be
3640                  * removed.
3641                  */
3642                 mutex_unlock(&trans->transaction->cache_write_mutex);
3643                 mutex_lock(&trans->transaction->cache_write_mutex);
3644         }
3645         mutex_unlock(&trans->transaction->cache_write_mutex);
3646
3647         /*
3648          * go through delayed refs for all the stuff we've just kicked off
3649          * and then loop back (just once)
3650          */
3651         ret = btrfs_run_delayed_refs(trans, root, 0);
3652         if (!ret && loops == 0) {
3653                 loops++;
3654                 spin_lock(&cur_trans->dirty_bgs_lock);
3655                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3656                 /*
3657                  * dirty_bgs_lock protects us from concurrent block group
3658                  * deletes too (not just cache_write_mutex).
3659                  */
3660                 if (!list_empty(&dirty)) {
3661                         spin_unlock(&cur_trans->dirty_bgs_lock);
3662                         goto again;
3663                 }
3664                 spin_unlock(&cur_trans->dirty_bgs_lock);
3665         }
3666
3667         btrfs_free_path(path);
3668         return ret;
3669 }
3670
3671 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3672                                    struct btrfs_root *root)
3673 {
3674         struct btrfs_block_group_cache *cache;
3675         struct btrfs_transaction *cur_trans = trans->transaction;
3676         int ret = 0;
3677         int should_put;
3678         struct btrfs_path *path;
3679         struct list_head *io = &cur_trans->io_bgs;
3680         int num_started = 0;
3681
3682         path = btrfs_alloc_path();
3683         if (!path)
3684                 return -ENOMEM;
3685
3686         /*
3687          * We don't need the lock here since we are protected by the transaction
3688          * commit.  We want to do the cache_save_setup first and then run the
3689          * delayed refs to make sure we have the best chance at doing this all
3690          * in one shot.
3691          */
3692         while (!list_empty(&cur_trans->dirty_bgs)) {
3693                 cache = list_first_entry(&cur_trans->dirty_bgs,
3694                                          struct btrfs_block_group_cache,
3695                                          dirty_list);
3696
3697                 /*
3698                  * this can happen if cache_save_setup re-dirties a block
3699                  * group that is already under IO.  Just wait for it to
3700                  * finish and then do it all again
3701                  */
3702                 if (!list_empty(&cache->io_list)) {
3703                         list_del_init(&cache->io_list);
3704                         btrfs_wait_cache_io(root, trans, cache,
3705                                             &cache->io_ctl, path,
3706                                             cache->key.objectid);
3707                         btrfs_put_block_group(cache);
3708                 }
3709
3710                 /*
3711                  * don't remove from the dirty list until after we've waited
3712                  * on any pending IO
3713                  */
3714                 list_del_init(&cache->dirty_list);
3715                 should_put = 1;
3716
3717                 cache_save_setup(cache, trans, path);
3718
3719                 if (!ret)
3720                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3721
3722                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3723                         cache->io_ctl.inode = NULL;
3724                         ret = btrfs_write_out_cache(root, trans, cache, path);
3725                         if (ret == 0 && cache->io_ctl.inode) {
3726                                 num_started++;
3727                                 should_put = 0;
3728                                 list_add_tail(&cache->io_list, io);
3729                         } else {
3730                                 /*
3731                                  * if we failed to write the cache, the
3732                                  * generation will be bad and life goes on
3733                                  */
3734                                 ret = 0;
3735                         }
3736                 }
3737                 if (!ret) {
3738                         ret = write_one_cache_group(trans, root, path, cache);
3739                         if (ret)
3740                                 btrfs_abort_transaction(trans, root, ret);
3741                 }
3742
3743                 /* if its not on the io list, we need to put the block group */
3744                 if (should_put)
3745                         btrfs_put_block_group(cache);
3746         }
3747
3748         while (!list_empty(io)) {
3749                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3750                                          io_list);
3751                 list_del_init(&cache->io_list);
3752                 btrfs_wait_cache_io(root, trans, cache,
3753                                     &cache->io_ctl, path, cache->key.objectid);
3754                 btrfs_put_block_group(cache);
3755         }
3756
3757         btrfs_free_path(path);
3758         return ret;
3759 }
3760
3761 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3762 {
3763         struct btrfs_block_group_cache *block_group;
3764         int readonly = 0;
3765
3766         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3767         if (!block_group || block_group->ro)
3768                 readonly = 1;
3769         if (block_group)
3770                 btrfs_put_block_group(block_group);
3771         return readonly;
3772 }
3773
3774 static const char *alloc_name(u64 flags)
3775 {
3776         switch (flags) {
3777         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3778                 return "mixed";
3779         case BTRFS_BLOCK_GROUP_METADATA:
3780                 return "metadata";
3781         case BTRFS_BLOCK_GROUP_DATA:
3782                 return "data";
3783         case BTRFS_BLOCK_GROUP_SYSTEM:
3784                 return "system";
3785         default:
3786                 WARN_ON(1);
3787                 return "invalid-combination";
3788         };
3789 }
3790
3791 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3792                              u64 total_bytes, u64 bytes_used,
3793                              struct btrfs_space_info **space_info)
3794 {
3795         struct btrfs_space_info *found;
3796         int i;
3797         int factor;
3798         int ret;
3799
3800         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3801                      BTRFS_BLOCK_GROUP_RAID10))
3802                 factor = 2;
3803         else
3804                 factor = 1;
3805
3806         found = __find_space_info(info, flags);
3807         if (found) {
3808                 spin_lock(&found->lock);
3809                 found->total_bytes += total_bytes;
3810                 found->disk_total += total_bytes * factor;
3811                 found->bytes_used += bytes_used;
3812                 found->disk_used += bytes_used * factor;
3813                 if (total_bytes > 0)
3814                         found->full = 0;
3815                 spin_unlock(&found->lock);
3816                 *space_info = found;
3817                 return 0;
3818         }
3819         found = kzalloc(sizeof(*found), GFP_NOFS);
3820         if (!found)
3821                 return -ENOMEM;
3822
3823         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3824         if (ret) {
3825                 kfree(found);
3826                 return ret;
3827         }
3828
3829         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3830                 INIT_LIST_HEAD(&found->block_groups[i]);
3831         init_rwsem(&found->groups_sem);
3832         spin_lock_init(&found->lock);
3833         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3834         found->total_bytes = total_bytes;
3835         found->disk_total = total_bytes * factor;
3836         found->bytes_used = bytes_used;
3837         found->disk_used = bytes_used * factor;
3838         found->bytes_pinned = 0;
3839         found->bytes_reserved = 0;
3840         found->bytes_readonly = 0;
3841         found->bytes_may_use = 0;
3842         found->full = 0;
3843         found->max_extent_size = 0;
3844         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3845         found->chunk_alloc = 0;
3846         found->flush = 0;
3847         init_waitqueue_head(&found->wait);
3848         INIT_LIST_HEAD(&found->ro_bgs);
3849
3850         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3851                                     info->space_info_kobj, "%s",
3852                                     alloc_name(found->flags));
3853         if (ret) {
3854                 kfree(found);
3855                 return ret;
3856         }
3857
3858         *space_info = found;
3859         list_add_rcu(&found->list, &info->space_info);
3860         if (flags & BTRFS_BLOCK_GROUP_DATA)
3861                 info->data_sinfo = found;
3862
3863         return ret;
3864 }
3865
3866 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3867 {
3868         u64 extra_flags = chunk_to_extended(flags) &
3869                                 BTRFS_EXTENDED_PROFILE_MASK;
3870
3871         write_seqlock(&fs_info->profiles_lock);
3872         if (flags & BTRFS_BLOCK_GROUP_DATA)
3873                 fs_info->avail_data_alloc_bits |= extra_flags;
3874         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3875                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3876         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3877                 fs_info->avail_system_alloc_bits |= extra_flags;
3878         write_sequnlock(&fs_info->profiles_lock);
3879 }
3880
3881 /*
3882  * returns target flags in extended format or 0 if restripe for this
3883  * chunk_type is not in progress
3884  *
3885  * should be called with either volume_mutex or balance_lock held
3886  */
3887 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3888 {
3889         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3890         u64 target = 0;
3891
3892         if (!bctl)
3893                 return 0;
3894
3895         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3896             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3897                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3898         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3899                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3900                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3901         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3902                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3903                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3904         }
3905
3906         return target;
3907 }
3908
3909 /*
3910  * @flags: available profiles in extended format (see ctree.h)
3911  *
3912  * Returns reduced profile in chunk format.  If profile changing is in
3913  * progress (either running or paused) picks the target profile (if it's
3914  * already available), otherwise falls back to plain reducing.
3915  */
3916 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3917 {
3918         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3919         u64 target;
3920         u64 raid_type;
3921         u64 allowed = 0;
3922
3923         /*
3924          * see if restripe for this chunk_type is in progress, if so
3925          * try to reduce to the target profile
3926          */
3927         spin_lock(&root->fs_info->balance_lock);
3928         target = get_restripe_target(root->fs_info, flags);
3929         if (target) {
3930                 /* pick target profile only if it's already available */
3931                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3932                         spin_unlock(&root->fs_info->balance_lock);
3933                         return extended_to_chunk(target);
3934                 }
3935         }
3936         spin_unlock(&root->fs_info->balance_lock);
3937
3938         /* First, mask out the RAID levels which aren't possible */
3939         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3940                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3941                         allowed |= btrfs_raid_group[raid_type];
3942         }
3943         allowed &= flags;
3944
3945         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3946                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3947         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3948                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3949         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3950                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3951         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3952                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3953         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3954                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3955
3956         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3957
3958         return extended_to_chunk(flags | allowed);
3959 }
3960
3961 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3962 {
3963         unsigned seq;
3964         u64 flags;
3965
3966         do {
3967                 flags = orig_flags;
3968                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3969
3970                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3971                         flags |= root->fs_info->avail_data_alloc_bits;
3972                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3973                         flags |= root->fs_info->avail_system_alloc_bits;
3974                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3975                         flags |= root->fs_info->avail_metadata_alloc_bits;
3976         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3977
3978         return btrfs_reduce_alloc_profile(root, flags);
3979 }
3980
3981 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3982 {
3983         u64 flags;
3984         u64 ret;
3985
3986         if (data)
3987                 flags = BTRFS_BLOCK_GROUP_DATA;
3988         else if (root == root->fs_info->chunk_root)
3989                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3990         else
3991                 flags = BTRFS_BLOCK_GROUP_METADATA;
3992
3993         ret = get_alloc_profile(root, flags);
3994         return ret;
3995 }
3996
3997 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
3998 {
3999         struct btrfs_space_info *data_sinfo;
4000         struct btrfs_root *root = BTRFS_I(inode)->root;
4001         struct btrfs_fs_info *fs_info = root->fs_info;
4002         u64 used;
4003         int ret = 0;
4004         int need_commit = 2;
4005         int have_pinned_space;
4006
4007         /* make sure bytes are sectorsize aligned */
4008         bytes = ALIGN(bytes, root->sectorsize);
4009
4010         if (btrfs_is_free_space_inode(inode)) {
4011                 need_commit = 0;
4012                 ASSERT(current->journal_info);
4013         }
4014
4015         data_sinfo = fs_info->data_sinfo;
4016         if (!data_sinfo)
4017                 goto alloc;
4018
4019 again:
4020         /* make sure we have enough space to handle the data first */
4021         spin_lock(&data_sinfo->lock);
4022         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4023                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4024                 data_sinfo->bytes_may_use;
4025
4026         if (used + bytes > data_sinfo->total_bytes) {
4027                 struct btrfs_trans_handle *trans;
4028
4029                 /*
4030                  * if we don't have enough free bytes in this space then we need
4031                  * to alloc a new chunk.
4032                  */
4033                 if (!data_sinfo->full) {
4034                         u64 alloc_target;
4035
4036                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4037                         spin_unlock(&data_sinfo->lock);
4038 alloc:
4039                         alloc_target = btrfs_get_alloc_profile(root, 1);
4040                         /*
4041                          * It is ugly that we don't call nolock join
4042                          * transaction for the free space inode case here.
4043                          * But it is safe because we only do the data space
4044                          * reservation for the free space cache in the
4045                          * transaction context, the common join transaction
4046                          * just increase the counter of the current transaction
4047                          * handler, doesn't try to acquire the trans_lock of
4048                          * the fs.
4049                          */
4050                         trans = btrfs_join_transaction(root);
4051                         if (IS_ERR(trans))
4052                                 return PTR_ERR(trans);
4053
4054                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4055                                              alloc_target,
4056                                              CHUNK_ALLOC_NO_FORCE);
4057                         btrfs_end_transaction(trans, root);
4058                         if (ret < 0) {
4059                                 if (ret != -ENOSPC)
4060                                         return ret;
4061                                 else {
4062                                         have_pinned_space = 1;
4063                                         goto commit_trans;
4064                                 }
4065                         }
4066
4067                         if (!data_sinfo)
4068                                 data_sinfo = fs_info->data_sinfo;
4069
4070                         goto again;
4071                 }
4072
4073                 /*
4074                  * If we don't have enough pinned space to deal with this
4075                  * allocation, and no removed chunk in current transaction,
4076                  * don't bother committing the transaction.
4077                  */
4078                 have_pinned_space = percpu_counter_compare(
4079                         &data_sinfo->total_bytes_pinned,
4080                         used + bytes - data_sinfo->total_bytes);
4081                 spin_unlock(&data_sinfo->lock);
4082
4083                 /* commit the current transaction and try again */
4084 commit_trans:
4085                 if (need_commit &&
4086                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4087                         need_commit--;
4088
4089                         if (need_commit > 0)
4090                                 btrfs_wait_ordered_roots(fs_info, -1);
4091
4092                         trans = btrfs_join_transaction(root);
4093                         if (IS_ERR(trans))
4094                                 return PTR_ERR(trans);
4095                         if (have_pinned_space >= 0 ||
4096                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4097                                      &trans->transaction->flags) ||
4098                             need_commit > 0) {
4099                                 ret = btrfs_commit_transaction(trans, root);
4100                                 if (ret)
4101                                         return ret;
4102                                 /*
4103                                  * make sure that all running delayed iput are
4104                                  * done
4105                                  */
4106                                 down_write(&root->fs_info->delayed_iput_sem);
4107                                 up_write(&root->fs_info->delayed_iput_sem);
4108                                 goto again;
4109                         } else {
4110                                 btrfs_end_transaction(trans, root);
4111                         }
4112                 }
4113
4114                 trace_btrfs_space_reservation(root->fs_info,
4115                                               "space_info:enospc",
4116                                               data_sinfo->flags, bytes, 1);
4117                 return -ENOSPC;
4118         }
4119         data_sinfo->bytes_may_use += bytes;
4120         trace_btrfs_space_reservation(root->fs_info, "space_info",
4121                                       data_sinfo->flags, bytes, 1);
4122         spin_unlock(&data_sinfo->lock);
4123
4124         return ret;
4125 }
4126
4127 /*
4128  * New check_data_free_space() with ability for precious data reservation
4129  * Will replace old btrfs_check_data_free_space(), but for patch split,
4130  * add a new function first and then replace it.
4131  */
4132 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4133 {
4134         struct btrfs_root *root = BTRFS_I(inode)->root;
4135         int ret;
4136
4137         /* align the range */
4138         len = round_up(start + len, root->sectorsize) -
4139               round_down(start, root->sectorsize);
4140         start = round_down(start, root->sectorsize);
4141
4142         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4143         if (ret < 0)
4144                 return ret;
4145
4146         /*
4147          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4148          *
4149          * TODO: Find a good method to avoid reserve data space for NOCOW
4150          * range, but don't impact performance on quota disable case.
4151          */
4152         ret = btrfs_qgroup_reserve_data(inode, start, len);
4153         return ret;
4154 }
4155
4156 /*
4157  * Called if we need to clear a data reservation for this inode
4158  * Normally in a error case.
4159  *
4160  * This one will *NOT* use accurate qgroup reserved space API, just for case
4161  * which we can't sleep and is sure it won't affect qgroup reserved space.
4162  * Like clear_bit_hook().
4163  */
4164 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4165                                             u64 len)
4166 {
4167         struct btrfs_root *root = BTRFS_I(inode)->root;
4168         struct btrfs_space_info *data_sinfo;
4169
4170         /* Make sure the range is aligned to sectorsize */
4171         len = round_up(start + len, root->sectorsize) -
4172               round_down(start, root->sectorsize);
4173         start = round_down(start, root->sectorsize);
4174
4175         data_sinfo = root->fs_info->data_sinfo;
4176         spin_lock(&data_sinfo->lock);
4177         if (WARN_ON(data_sinfo->bytes_may_use < len))
4178                 data_sinfo->bytes_may_use = 0;
4179         else
4180                 data_sinfo->bytes_may_use -= len;
4181         trace_btrfs_space_reservation(root->fs_info, "space_info",
4182                                       data_sinfo->flags, len, 0);
4183         spin_unlock(&data_sinfo->lock);
4184 }
4185
4186 /*
4187  * Called if we need to clear a data reservation for this inode
4188  * Normally in a error case.
4189  *
4190  * This one will handle the per-indoe data rsv map for accurate reserved
4191  * space framework.
4192  */
4193 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4194 {
4195         btrfs_free_reserved_data_space_noquota(inode, start, len);
4196         btrfs_qgroup_free_data(inode, start, len);
4197 }
4198
4199 static void force_metadata_allocation(struct btrfs_fs_info *info)
4200 {
4201         struct list_head *head = &info->space_info;
4202         struct btrfs_space_info *found;
4203
4204         rcu_read_lock();
4205         list_for_each_entry_rcu(found, head, list) {
4206                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4207                         found->force_alloc = CHUNK_ALLOC_FORCE;
4208         }
4209         rcu_read_unlock();
4210 }
4211
4212 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4213 {
4214         return (global->size << 1);
4215 }
4216
4217 static int should_alloc_chunk(struct btrfs_root *root,
4218                               struct btrfs_space_info *sinfo, int force)
4219 {
4220         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4221         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4222         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4223         u64 thresh;
4224
4225         if (force == CHUNK_ALLOC_FORCE)
4226                 return 1;
4227
4228         /*
4229          * We need to take into account the global rsv because for all intents
4230          * and purposes it's used space.  Don't worry about locking the
4231          * global_rsv, it doesn't change except when the transaction commits.
4232          */
4233         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4234                 num_allocated += calc_global_rsv_need_space(global_rsv);
4235
4236         /*
4237          * in limited mode, we want to have some free space up to
4238          * about 1% of the FS size.
4239          */
4240         if (force == CHUNK_ALLOC_LIMITED) {
4241                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4242                 thresh = max_t(u64, 64 * 1024 * 1024,
4243                                div_factor_fine(thresh, 1));
4244
4245                 if (num_bytes - num_allocated < thresh)
4246                         return 1;
4247         }
4248
4249         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4250                 return 0;
4251         return 1;
4252 }
4253
4254 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4255 {
4256         u64 num_dev;
4257
4258         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4259                     BTRFS_BLOCK_GROUP_RAID0 |
4260                     BTRFS_BLOCK_GROUP_RAID5 |
4261                     BTRFS_BLOCK_GROUP_RAID6))
4262                 num_dev = root->fs_info->fs_devices->rw_devices;
4263         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4264                 num_dev = 2;
4265         else
4266                 num_dev = 1;    /* DUP or single */
4267
4268         return num_dev;
4269 }
4270
4271 /*
4272  * If @is_allocation is true, reserve space in the system space info necessary
4273  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4274  * removing a chunk.
4275  */
4276 void check_system_chunk(struct btrfs_trans_handle *trans,
4277                         struct btrfs_root *root,
4278                         u64 type)
4279 {
4280         struct btrfs_space_info *info;
4281         u64 left;
4282         u64 thresh;
4283         int ret = 0;
4284         u64 num_devs;
4285
4286         /*
4287          * Needed because we can end up allocating a system chunk and for an
4288          * atomic and race free space reservation in the chunk block reserve.
4289          */
4290         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4291
4292         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4293         spin_lock(&info->lock);
4294         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4295                 info->bytes_reserved - info->bytes_readonly -
4296                 info->bytes_may_use;
4297         spin_unlock(&info->lock);
4298
4299         num_devs = get_profile_num_devs(root, type);
4300
4301         /* num_devs device items to update and 1 chunk item to add or remove */
4302         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4303                 btrfs_calc_trans_metadata_size(root, 1);
4304
4305         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4306                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4307                         left, thresh, type);
4308                 dump_space_info(info, 0, 0);
4309         }
4310
4311         if (left < thresh) {
4312                 u64 flags;
4313
4314                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4315                 /*
4316                  * Ignore failure to create system chunk. We might end up not
4317                  * needing it, as we might not need to COW all nodes/leafs from
4318                  * the paths we visit in the chunk tree (they were already COWed
4319                  * or created in the current transaction for example).
4320                  */
4321                 ret = btrfs_alloc_chunk(trans, root, flags);
4322         }
4323
4324         if (!ret) {
4325                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4326                                           &root->fs_info->chunk_block_rsv,
4327                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4328                 if (!ret)
4329                         trans->chunk_bytes_reserved += thresh;
4330         }
4331 }
4332
4333 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4334                           struct btrfs_root *extent_root, u64 flags, int force)
4335 {
4336         struct btrfs_space_info *space_info;
4337         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4338         int wait_for_alloc = 0;
4339         int ret = 0;
4340
4341         /* Don't re-enter if we're already allocating a chunk */
4342         if (trans->allocating_chunk)
4343                 return -ENOSPC;
4344
4345         space_info = __find_space_info(extent_root->fs_info, flags);
4346         if (!space_info) {
4347                 ret = update_space_info(extent_root->fs_info, flags,
4348                                         0, 0, &space_info);
4349                 BUG_ON(ret); /* -ENOMEM */
4350         }
4351         BUG_ON(!space_info); /* Logic error */
4352
4353 again:
4354         spin_lock(&space_info->lock);
4355         if (force < space_info->force_alloc)
4356                 force = space_info->force_alloc;
4357         if (space_info->full) {
4358                 if (should_alloc_chunk(extent_root, space_info, force))
4359                         ret = -ENOSPC;
4360                 else
4361                         ret = 0;
4362                 spin_unlock(&space_info->lock);
4363                 return ret;
4364         }
4365
4366         if (!should_alloc_chunk(extent_root, space_info, force)) {
4367                 spin_unlock(&space_info->lock);
4368                 return 0;
4369         } else if (space_info->chunk_alloc) {
4370                 wait_for_alloc = 1;
4371         } else {
4372                 space_info->chunk_alloc = 1;
4373         }
4374
4375         spin_unlock(&space_info->lock);
4376
4377         mutex_lock(&fs_info->chunk_mutex);
4378
4379         /*
4380          * The chunk_mutex is held throughout the entirety of a chunk
4381          * allocation, so once we've acquired the chunk_mutex we know that the
4382          * other guy is done and we need to recheck and see if we should
4383          * allocate.
4384          */
4385         if (wait_for_alloc) {
4386                 mutex_unlock(&fs_info->chunk_mutex);
4387                 wait_for_alloc = 0;
4388                 goto again;
4389         }
4390
4391         trans->allocating_chunk = true;
4392
4393         /*
4394          * If we have mixed data/metadata chunks we want to make sure we keep
4395          * allocating mixed chunks instead of individual chunks.
4396          */
4397         if (btrfs_mixed_space_info(space_info))
4398                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4399
4400         /*
4401          * if we're doing a data chunk, go ahead and make sure that
4402          * we keep a reasonable number of metadata chunks allocated in the
4403          * FS as well.
4404          */
4405         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4406                 fs_info->data_chunk_allocations++;
4407                 if (!(fs_info->data_chunk_allocations %
4408                       fs_info->metadata_ratio))
4409                         force_metadata_allocation(fs_info);
4410         }
4411
4412         /*
4413          * Check if we have enough space in SYSTEM chunk because we may need
4414          * to update devices.
4415          */
4416         check_system_chunk(trans, extent_root, flags);
4417
4418         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4419         trans->allocating_chunk = false;
4420
4421         spin_lock(&space_info->lock);
4422         if (ret < 0 && ret != -ENOSPC)
4423                 goto out;
4424         if (ret)
4425                 space_info->full = 1;
4426         else
4427                 ret = 1;
4428
4429         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4430 out:
4431         space_info->chunk_alloc = 0;
4432         spin_unlock(&space_info->lock);
4433         mutex_unlock(&fs_info->chunk_mutex);
4434         /*
4435          * When we allocate a new chunk we reserve space in the chunk block
4436          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4437          * add new nodes/leafs to it if we end up needing to do it when
4438          * inserting the chunk item and updating device items as part of the
4439          * second phase of chunk allocation, performed by
4440          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4441          * large number of new block groups to create in our transaction
4442          * handle's new_bgs list to avoid exhausting the chunk block reserve
4443          * in extreme cases - like having a single transaction create many new
4444          * block groups when starting to write out the free space caches of all
4445          * the block groups that were made dirty during the lifetime of the
4446          * transaction.
4447          */
4448         if (trans->can_flush_pending_bgs &&
4449             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4450                 btrfs_create_pending_block_groups(trans, trans->root);
4451                 btrfs_trans_release_chunk_metadata(trans);
4452         }
4453         return ret;
4454 }
4455
4456 static int can_overcommit(struct btrfs_root *root,
4457                           struct btrfs_space_info *space_info, u64 bytes,
4458                           enum btrfs_reserve_flush_enum flush)
4459 {
4460         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4461         u64 profile = btrfs_get_alloc_profile(root, 0);
4462         u64 space_size;
4463         u64 avail;
4464         u64 used;
4465
4466         used = space_info->bytes_used + space_info->bytes_reserved +
4467                 space_info->bytes_pinned + space_info->bytes_readonly;
4468
4469         /*
4470          * We only want to allow over committing if we have lots of actual space
4471          * free, but if we don't have enough space to handle the global reserve
4472          * space then we could end up having a real enospc problem when trying
4473          * to allocate a chunk or some other such important allocation.
4474          */
4475         spin_lock(&global_rsv->lock);
4476         space_size = calc_global_rsv_need_space(global_rsv);
4477         spin_unlock(&global_rsv->lock);
4478         if (used + space_size >= space_info->total_bytes)
4479                 return 0;
4480
4481         used += space_info->bytes_may_use;
4482
4483         spin_lock(&root->fs_info->free_chunk_lock);
4484         avail = root->fs_info->free_chunk_space;
4485         spin_unlock(&root->fs_info->free_chunk_lock);
4486
4487         /*
4488          * If we have dup, raid1 or raid10 then only half of the free
4489          * space is actually useable.  For raid56, the space info used
4490          * doesn't include the parity drive, so we don't have to
4491          * change the math
4492          */
4493         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4494                        BTRFS_BLOCK_GROUP_RAID1 |
4495                        BTRFS_BLOCK_GROUP_RAID10))
4496                 avail >>= 1;
4497
4498         /*
4499          * If we aren't flushing all things, let us overcommit up to
4500          * 1/2th of the space. If we can flush, don't let us overcommit
4501          * too much, let it overcommit up to 1/8 of the space.
4502          */
4503         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4504                 avail >>= 3;
4505         else
4506                 avail >>= 1;
4507
4508         if (used + bytes < space_info->total_bytes + avail)
4509                 return 1;
4510         return 0;
4511 }
4512
4513 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4514                                          unsigned long nr_pages, int nr_items)
4515 {
4516         struct super_block *sb = root->fs_info->sb;
4517
4518         if (down_read_trylock(&sb->s_umount)) {
4519                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4520                 up_read(&sb->s_umount);
4521         } else {
4522                 /*
4523                  * We needn't worry the filesystem going from r/w to r/o though
4524                  * we don't acquire ->s_umount mutex, because the filesystem
4525                  * should guarantee the delalloc inodes list be empty after
4526                  * the filesystem is readonly(all dirty pages are written to
4527                  * the disk).
4528                  */
4529                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4530                 if (!current->journal_info)
4531                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4532         }
4533 }
4534
4535 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4536 {
4537         u64 bytes;
4538         int nr;
4539
4540         bytes = btrfs_calc_trans_metadata_size(root, 1);
4541         nr = (int)div64_u64(to_reclaim, bytes);
4542         if (!nr)
4543                 nr = 1;
4544         return nr;
4545 }
4546
4547 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4548
4549 /*
4550  * shrink metadata reservation for delalloc
4551  */
4552 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4553                             bool wait_ordered)
4554 {
4555         struct btrfs_block_rsv *block_rsv;
4556         struct btrfs_space_info *space_info;
4557         struct btrfs_trans_handle *trans;
4558         u64 delalloc_bytes;
4559         u64 max_reclaim;
4560         long time_left;
4561         unsigned long nr_pages;
4562         int loops;
4563         int items;
4564         enum btrfs_reserve_flush_enum flush;
4565
4566         /* Calc the number of the pages we need flush for space reservation */
4567         items = calc_reclaim_items_nr(root, to_reclaim);
4568         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4569
4570         trans = (struct btrfs_trans_handle *)current->journal_info;
4571         block_rsv = &root->fs_info->delalloc_block_rsv;
4572         space_info = block_rsv->space_info;
4573
4574         delalloc_bytes = percpu_counter_sum_positive(
4575                                                 &root->fs_info->delalloc_bytes);
4576         if (delalloc_bytes == 0) {
4577                 if (trans)
4578                         return;
4579                 if (wait_ordered)
4580                         btrfs_wait_ordered_roots(root->fs_info, items);
4581                 return;
4582         }
4583
4584         loops = 0;
4585         while (delalloc_bytes && loops < 3) {
4586                 max_reclaim = min(delalloc_bytes, to_reclaim);
4587                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4588                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4589                 /*
4590                  * We need to wait for the async pages to actually start before
4591                  * we do anything.
4592                  */
4593                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4594                 if (!max_reclaim)
4595                         goto skip_async;
4596
4597                 if (max_reclaim <= nr_pages)
4598                         max_reclaim = 0;
4599                 else
4600                         max_reclaim -= nr_pages;
4601
4602                 wait_event(root->fs_info->async_submit_wait,
4603                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4604                            (int)max_reclaim);
4605 skip_async:
4606                 if (!trans)
4607                         flush = BTRFS_RESERVE_FLUSH_ALL;
4608                 else
4609                         flush = BTRFS_RESERVE_NO_FLUSH;
4610                 spin_lock(&space_info->lock);
4611                 if (can_overcommit(root, space_info, orig, flush)) {
4612                         spin_unlock(&space_info->lock);
4613                         break;
4614                 }
4615                 spin_unlock(&space_info->lock);
4616
4617                 loops++;
4618                 if (wait_ordered && !trans) {
4619                         btrfs_wait_ordered_roots(root->fs_info, items);
4620                 } else {
4621                         time_left = schedule_timeout_killable(1);
4622                         if (time_left)
4623                                 break;
4624                 }
4625                 delalloc_bytes = percpu_counter_sum_positive(
4626                                                 &root->fs_info->delalloc_bytes);
4627         }
4628 }
4629
4630 /**
4631  * maybe_commit_transaction - possibly commit the transaction if its ok to
4632  * @root - the root we're allocating for
4633  * @bytes - the number of bytes we want to reserve
4634  * @force - force the commit
4635  *
4636  * This will check to make sure that committing the transaction will actually
4637  * get us somewhere and then commit the transaction if it does.  Otherwise it
4638  * will return -ENOSPC.
4639  */
4640 static int may_commit_transaction(struct btrfs_root *root,
4641                                   struct btrfs_space_info *space_info,
4642                                   u64 bytes, int force)
4643 {
4644         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4645         struct btrfs_trans_handle *trans;
4646
4647         trans = (struct btrfs_trans_handle *)current->journal_info;
4648         if (trans)
4649                 return -EAGAIN;
4650
4651         if (force)
4652                 goto commit;
4653
4654         /* See if there is enough pinned space to make this reservation */
4655         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4656                                    bytes) >= 0)
4657                 goto commit;
4658
4659         /*
4660          * See if there is some space in the delayed insertion reservation for
4661          * this reservation.
4662          */
4663         if (space_info != delayed_rsv->space_info)
4664                 return -ENOSPC;
4665
4666         spin_lock(&delayed_rsv->lock);
4667         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4668                                    bytes - delayed_rsv->size) >= 0) {
4669                 spin_unlock(&delayed_rsv->lock);
4670                 return -ENOSPC;
4671         }
4672         spin_unlock(&delayed_rsv->lock);
4673
4674 commit:
4675         trans = btrfs_join_transaction(root);
4676         if (IS_ERR(trans))
4677                 return -ENOSPC;
4678
4679         return btrfs_commit_transaction(trans, root);
4680 }
4681
4682 enum flush_state {
4683         FLUSH_DELAYED_ITEMS_NR  =       1,
4684         FLUSH_DELAYED_ITEMS     =       2,
4685         FLUSH_DELALLOC          =       3,
4686         FLUSH_DELALLOC_WAIT     =       4,
4687         ALLOC_CHUNK             =       5,
4688         COMMIT_TRANS            =       6,
4689 };
4690
4691 static int flush_space(struct btrfs_root *root,
4692                        struct btrfs_space_info *space_info, u64 num_bytes,
4693                        u64 orig_bytes, int state)
4694 {
4695         struct btrfs_trans_handle *trans;
4696         int nr;
4697         int ret = 0;
4698
4699         switch (state) {
4700         case FLUSH_DELAYED_ITEMS_NR:
4701         case FLUSH_DELAYED_ITEMS:
4702                 if (state == FLUSH_DELAYED_ITEMS_NR)
4703                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4704                 else
4705                         nr = -1;
4706
4707                 trans = btrfs_join_transaction(root);
4708                 if (IS_ERR(trans)) {
4709                         ret = PTR_ERR(trans);
4710                         break;
4711                 }
4712                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4713                 btrfs_end_transaction(trans, root);
4714                 break;
4715         case FLUSH_DELALLOC:
4716         case FLUSH_DELALLOC_WAIT:
4717                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4718                                 state == FLUSH_DELALLOC_WAIT);
4719                 break;
4720         case ALLOC_CHUNK:
4721                 trans = btrfs_join_transaction(root);
4722                 if (IS_ERR(trans)) {
4723                         ret = PTR_ERR(trans);
4724                         break;
4725                 }
4726                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4727                                      btrfs_get_alloc_profile(root, 0),
4728                                      CHUNK_ALLOC_NO_FORCE);
4729                 btrfs_end_transaction(trans, root);
4730                 if (ret == -ENOSPC)
4731                         ret = 0;
4732                 break;
4733         case COMMIT_TRANS:
4734                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4735                 break;
4736         default:
4737                 ret = -ENOSPC;
4738                 break;
4739         }
4740
4741         return ret;
4742 }
4743
4744 static inline u64
4745 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4746                                  struct btrfs_space_info *space_info)
4747 {
4748         u64 used;
4749         u64 expected;
4750         u64 to_reclaim;
4751
4752         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4753                                 16 * 1024 * 1024);
4754         spin_lock(&space_info->lock);
4755         if (can_overcommit(root, space_info, to_reclaim,
4756                            BTRFS_RESERVE_FLUSH_ALL)) {
4757                 to_reclaim = 0;
4758                 goto out;
4759         }
4760
4761         used = space_info->bytes_used + space_info->bytes_reserved +
4762                space_info->bytes_pinned + space_info->bytes_readonly +
4763                space_info->bytes_may_use;
4764         if (can_overcommit(root, space_info, 1024 * 1024,
4765                            BTRFS_RESERVE_FLUSH_ALL))
4766                 expected = div_factor_fine(space_info->total_bytes, 95);
4767         else
4768                 expected = div_factor_fine(space_info->total_bytes, 90);
4769
4770         if (used > expected)
4771                 to_reclaim = used - expected;
4772         else
4773                 to_reclaim = 0;
4774         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4775                                      space_info->bytes_reserved);
4776 out:
4777         spin_unlock(&space_info->lock);
4778
4779         return to_reclaim;
4780 }
4781
4782 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4783                                         struct btrfs_fs_info *fs_info, u64 used)
4784 {
4785         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4786
4787         /* If we're just plain full then async reclaim just slows us down. */
4788         if (space_info->bytes_used >= thresh)
4789                 return 0;
4790
4791         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4792                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4793 }
4794
4795 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4796                                        struct btrfs_fs_info *fs_info,
4797                                        int flush_state)
4798 {
4799         u64 used;
4800
4801         spin_lock(&space_info->lock);
4802         /*
4803          * We run out of space and have not got any free space via flush_space,
4804          * so don't bother doing async reclaim.
4805          */
4806         if (flush_state > COMMIT_TRANS && space_info->full) {
4807                 spin_unlock(&space_info->lock);
4808                 return 0;
4809         }
4810
4811         used = space_info->bytes_used + space_info->bytes_reserved +
4812                space_info->bytes_pinned + space_info->bytes_readonly +
4813                space_info->bytes_may_use;
4814         if (need_do_async_reclaim(space_info, fs_info, used)) {
4815                 spin_unlock(&space_info->lock);
4816                 return 1;
4817         }
4818         spin_unlock(&space_info->lock);
4819
4820         return 0;
4821 }
4822
4823 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4824 {
4825         struct btrfs_fs_info *fs_info;
4826         struct btrfs_space_info *space_info;
4827         u64 to_reclaim;
4828         int flush_state;
4829
4830         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4831         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4832
4833         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4834                                                       space_info);
4835         if (!to_reclaim)
4836                 return;
4837
4838         flush_state = FLUSH_DELAYED_ITEMS_NR;
4839         do {
4840                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4841                             to_reclaim, flush_state);
4842                 flush_state++;
4843                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4844                                                  flush_state))
4845                         return;
4846         } while (flush_state < COMMIT_TRANS);
4847 }
4848
4849 void btrfs_init_async_reclaim_work(struct work_struct *work)
4850 {
4851         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4852 }
4853
4854 /**
4855  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4856  * @root - the root we're allocating for
4857  * @block_rsv - the block_rsv we're allocating for
4858  * @orig_bytes - the number of bytes we want
4859  * @flush - whether or not we can flush to make our reservation
4860  *
4861  * This will reserve orgi_bytes number of bytes from the space info associated
4862  * with the block_rsv.  If there is not enough space it will make an attempt to
4863  * flush out space to make room.  It will do this by flushing delalloc if
4864  * possible or committing the transaction.  If flush is 0 then no attempts to
4865  * regain reservations will be made and this will fail if there is not enough
4866  * space already.
4867  */
4868 static int reserve_metadata_bytes(struct btrfs_root *root,
4869                                   struct btrfs_block_rsv *block_rsv,
4870                                   u64 orig_bytes,
4871                                   enum btrfs_reserve_flush_enum flush)
4872 {
4873         struct btrfs_space_info *space_info = block_rsv->space_info;
4874         u64 used;
4875         u64 num_bytes = orig_bytes;
4876         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4877         int ret = 0;
4878         bool flushing = false;
4879
4880 again:
4881         ret = 0;
4882         spin_lock(&space_info->lock);
4883         /*
4884          * We only want to wait if somebody other than us is flushing and we
4885          * are actually allowed to flush all things.
4886          */
4887         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4888                space_info->flush) {
4889                 spin_unlock(&space_info->lock);
4890                 /*
4891                  * If we have a trans handle we can't wait because the flusher
4892                  * may have to commit the transaction, which would mean we would
4893                  * deadlock since we are waiting for the flusher to finish, but
4894                  * hold the current transaction open.
4895                  */
4896                 if (current->journal_info)
4897                         return -EAGAIN;
4898                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4899                 /* Must have been killed, return */
4900                 if (ret)
4901                         return -EINTR;
4902
4903                 spin_lock(&space_info->lock);
4904         }
4905
4906         ret = -ENOSPC;
4907         used = space_info->bytes_used + space_info->bytes_reserved +
4908                 space_info->bytes_pinned + space_info->bytes_readonly +
4909                 space_info->bytes_may_use;
4910
4911         /*
4912          * The idea here is that we've not already over-reserved the block group
4913          * then we can go ahead and save our reservation first and then start
4914          * flushing if we need to.  Otherwise if we've already overcommitted
4915          * lets start flushing stuff first and then come back and try to make
4916          * our reservation.
4917          */
4918         if (used <= space_info->total_bytes) {
4919                 if (used + orig_bytes <= space_info->total_bytes) {
4920                         space_info->bytes_may_use += orig_bytes;
4921                         trace_btrfs_space_reservation(root->fs_info,
4922                                 "space_info", space_info->flags, orig_bytes, 1);
4923                         ret = 0;
4924                 } else {
4925                         /*
4926                          * Ok set num_bytes to orig_bytes since we aren't
4927                          * overocmmitted, this way we only try and reclaim what
4928                          * we need.
4929                          */
4930                         num_bytes = orig_bytes;
4931                 }
4932         } else {
4933                 /*
4934                  * Ok we're over committed, set num_bytes to the overcommitted
4935                  * amount plus the amount of bytes that we need for this
4936                  * reservation.
4937                  */
4938                 num_bytes = used - space_info->total_bytes +
4939                         (orig_bytes * 2);
4940         }
4941
4942         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4943                 space_info->bytes_may_use += orig_bytes;
4944                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4945                                               space_info->flags, orig_bytes,
4946                                               1);
4947                 ret = 0;
4948         }
4949
4950         /*
4951          * Couldn't make our reservation, save our place so while we're trying
4952          * to reclaim space we can actually use it instead of somebody else
4953          * stealing it from us.
4954          *
4955          * We make the other tasks wait for the flush only when we can flush
4956          * all things.
4957          */
4958         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4959                 flushing = true;
4960                 space_info->flush = 1;
4961         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4962                 used += orig_bytes;
4963                 /*
4964                  * We will do the space reservation dance during log replay,
4965                  * which means we won't have fs_info->fs_root set, so don't do
4966                  * the async reclaim as we will panic.
4967                  */
4968                 if (!root->fs_info->log_root_recovering &&
4969                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4970                     !work_busy(&root->fs_info->async_reclaim_work))
4971                         queue_work(system_unbound_wq,
4972                                    &root->fs_info->async_reclaim_work);
4973         }
4974         spin_unlock(&space_info->lock);
4975
4976         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4977                 goto out;
4978
4979         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4980                           flush_state);
4981         flush_state++;
4982
4983         /*
4984          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4985          * would happen. So skip delalloc flush.
4986          */
4987         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4988             (flush_state == FLUSH_DELALLOC ||
4989              flush_state == FLUSH_DELALLOC_WAIT))
4990                 flush_state = ALLOC_CHUNK;
4991
4992         if (!ret)
4993                 goto again;
4994         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4995                  flush_state < COMMIT_TRANS)
4996                 goto again;
4997         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4998                  flush_state <= COMMIT_TRANS)
4999                 goto again;
5000
5001 out:
5002         if (ret == -ENOSPC &&
5003             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5004                 struct btrfs_block_rsv *global_rsv =
5005                         &root->fs_info->global_block_rsv;
5006
5007                 if (block_rsv != global_rsv &&
5008                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5009                         ret = 0;
5010         }
5011         if (ret == -ENOSPC)
5012                 trace_btrfs_space_reservation(root->fs_info,
5013                                               "space_info:enospc",
5014                                               space_info->flags, orig_bytes, 1);
5015         if (flushing) {
5016                 spin_lock(&space_info->lock);
5017                 space_info->flush = 0;
5018                 wake_up_all(&space_info->wait);
5019                 spin_unlock(&space_info->lock);
5020         }
5021         return ret;
5022 }
5023
5024 static struct btrfs_block_rsv *get_block_rsv(
5025                                         const struct btrfs_trans_handle *trans,
5026                                         const struct btrfs_root *root)
5027 {
5028         struct btrfs_block_rsv *block_rsv = NULL;
5029
5030         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5031             (root == root->fs_info->csum_root && trans->adding_csums) ||
5032              (root == root->fs_info->uuid_root))
5033                 block_rsv = trans->block_rsv;
5034
5035         if (!block_rsv)
5036                 block_rsv = root->block_rsv;
5037
5038         if (!block_rsv)
5039                 block_rsv = &root->fs_info->empty_block_rsv;
5040
5041         return block_rsv;
5042 }
5043
5044 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5045                                u64 num_bytes)
5046 {
5047         int ret = -ENOSPC;
5048         spin_lock(&block_rsv->lock);
5049         if (block_rsv->reserved >= num_bytes) {
5050                 block_rsv->reserved -= num_bytes;
5051                 if (block_rsv->reserved < block_rsv->size)
5052                         block_rsv->full = 0;
5053                 ret = 0;
5054         }
5055         spin_unlock(&block_rsv->lock);
5056         return ret;
5057 }
5058
5059 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5060                                 u64 num_bytes, int update_size)
5061 {
5062         spin_lock(&block_rsv->lock);
5063         block_rsv->reserved += num_bytes;
5064         if (update_size)
5065                 block_rsv->size += num_bytes;
5066         else if (block_rsv->reserved >= block_rsv->size)
5067                 block_rsv->full = 1;
5068         spin_unlock(&block_rsv->lock);
5069 }
5070
5071 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5072                              struct btrfs_block_rsv *dest, u64 num_bytes,
5073                              int min_factor)
5074 {
5075         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5076         u64 min_bytes;
5077
5078         if (global_rsv->space_info != dest->space_info)
5079                 return -ENOSPC;
5080
5081         spin_lock(&global_rsv->lock);
5082         min_bytes = div_factor(global_rsv->size, min_factor);
5083         if (global_rsv->reserved < min_bytes + num_bytes) {
5084                 spin_unlock(&global_rsv->lock);
5085                 return -ENOSPC;
5086         }
5087         global_rsv->reserved -= num_bytes;
5088         if (global_rsv->reserved < global_rsv->size)
5089                 global_rsv->full = 0;
5090         spin_unlock(&global_rsv->lock);
5091
5092         block_rsv_add_bytes(dest, num_bytes, 1);
5093         return 0;
5094 }
5095
5096 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5097                                     struct btrfs_block_rsv *block_rsv,
5098                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5099 {
5100         struct btrfs_space_info *space_info = block_rsv->space_info;
5101
5102         spin_lock(&block_rsv->lock);
5103         if (num_bytes == (u64)-1)
5104                 num_bytes = block_rsv->size;
5105         block_rsv->size -= num_bytes;
5106         if (block_rsv->reserved >= block_rsv->size) {
5107                 num_bytes = block_rsv->reserved - block_rsv->size;
5108                 block_rsv->reserved = block_rsv->size;
5109                 block_rsv->full = 1;
5110         } else {
5111                 num_bytes = 0;
5112         }
5113         spin_unlock(&block_rsv->lock);
5114
5115         if (num_bytes > 0) {
5116                 if (dest) {
5117                         spin_lock(&dest->lock);
5118                         if (!dest->full) {
5119                                 u64 bytes_to_add;
5120
5121                                 bytes_to_add = dest->size - dest->reserved;
5122                                 bytes_to_add = min(num_bytes, bytes_to_add);
5123                                 dest->reserved += bytes_to_add;
5124                                 if (dest->reserved >= dest->size)
5125                                         dest->full = 1;
5126                                 num_bytes -= bytes_to_add;
5127                         }
5128                         spin_unlock(&dest->lock);
5129                 }
5130                 if (num_bytes) {
5131                         spin_lock(&space_info->lock);
5132                         space_info->bytes_may_use -= num_bytes;
5133                         trace_btrfs_space_reservation(fs_info, "space_info",
5134                                         space_info->flags, num_bytes, 0);
5135                         spin_unlock(&space_info->lock);
5136                 }
5137         }
5138 }
5139
5140 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5141                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5142 {
5143         int ret;
5144
5145         ret = block_rsv_use_bytes(src, num_bytes);
5146         if (ret)
5147                 return ret;
5148
5149         block_rsv_add_bytes(dst, num_bytes, 1);
5150         return 0;
5151 }
5152
5153 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5154 {
5155         memset(rsv, 0, sizeof(*rsv));
5156         spin_lock_init(&rsv->lock);
5157         rsv->type = type;
5158 }
5159
5160 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5161                                               unsigned short type)
5162 {
5163         struct btrfs_block_rsv *block_rsv;
5164         struct btrfs_fs_info *fs_info = root->fs_info;
5165
5166         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5167         if (!block_rsv)
5168                 return NULL;
5169
5170         btrfs_init_block_rsv(block_rsv, type);
5171         block_rsv->space_info = __find_space_info(fs_info,
5172                                                   BTRFS_BLOCK_GROUP_METADATA);
5173         return block_rsv;
5174 }
5175
5176 void btrfs_free_block_rsv(struct btrfs_root *root,
5177                           struct btrfs_block_rsv *rsv)
5178 {
5179         if (!rsv)
5180                 return;
5181         btrfs_block_rsv_release(root, rsv, (u64)-1);
5182         kfree(rsv);
5183 }
5184
5185 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5186 {
5187         kfree(rsv);
5188 }
5189
5190 int btrfs_block_rsv_add(struct btrfs_root *root,
5191                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5192                         enum btrfs_reserve_flush_enum flush)
5193 {
5194         int ret;
5195
5196         if (num_bytes == 0)
5197                 return 0;
5198
5199         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5200         if (!ret) {
5201                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5202                 return 0;
5203         }
5204
5205         return ret;
5206 }
5207
5208 int btrfs_block_rsv_check(struct btrfs_root *root,
5209                           struct btrfs_block_rsv *block_rsv, int min_factor)
5210 {
5211         u64 num_bytes = 0;
5212         int ret = -ENOSPC;
5213
5214         if (!block_rsv)
5215                 return 0;
5216
5217         spin_lock(&block_rsv->lock);
5218         num_bytes = div_factor(block_rsv->size, min_factor);
5219         if (block_rsv->reserved >= num_bytes)
5220                 ret = 0;
5221         spin_unlock(&block_rsv->lock);
5222
5223         return ret;
5224 }
5225
5226 int btrfs_block_rsv_refill(struct btrfs_root *root,
5227                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5228                            enum btrfs_reserve_flush_enum flush)
5229 {
5230         u64 num_bytes = 0;
5231         int ret = -ENOSPC;
5232
5233         if (!block_rsv)
5234                 return 0;
5235
5236         spin_lock(&block_rsv->lock);
5237         num_bytes = min_reserved;
5238         if (block_rsv->reserved >= num_bytes)
5239                 ret = 0;
5240         else
5241                 num_bytes -= block_rsv->reserved;
5242         spin_unlock(&block_rsv->lock);
5243
5244         if (!ret)
5245                 return 0;
5246
5247         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5248         if (!ret) {
5249                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5250                 return 0;
5251         }
5252
5253         return ret;
5254 }
5255
5256 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5257                             struct btrfs_block_rsv *dst_rsv,
5258                             u64 num_bytes)
5259 {
5260         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5261 }
5262
5263 void btrfs_block_rsv_release(struct btrfs_root *root,
5264                              struct btrfs_block_rsv *block_rsv,
5265                              u64 num_bytes)
5266 {
5267         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5268         if (global_rsv == block_rsv ||
5269             block_rsv->space_info != global_rsv->space_info)
5270                 global_rsv = NULL;
5271         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5272                                 num_bytes);
5273 }
5274
5275 /*
5276  * helper to calculate size of global block reservation.
5277  * the desired value is sum of space used by extent tree,
5278  * checksum tree and root tree
5279  */
5280 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5281 {
5282         struct btrfs_space_info *sinfo;
5283         u64 num_bytes;
5284         u64 meta_used;
5285         u64 data_used;
5286         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5287
5288         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5289         spin_lock(&sinfo->lock);
5290         data_used = sinfo->bytes_used;
5291         spin_unlock(&sinfo->lock);
5292
5293         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5294         spin_lock(&sinfo->lock);
5295         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5296                 data_used = 0;
5297         meta_used = sinfo->bytes_used;
5298         spin_unlock(&sinfo->lock);
5299
5300         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5301                     csum_size * 2;
5302         num_bytes += div_u64(data_used + meta_used, 50);
5303
5304         if (num_bytes * 3 > meta_used)
5305                 num_bytes = div_u64(meta_used, 3);
5306
5307         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5308 }
5309
5310 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5311 {
5312         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5313         struct btrfs_space_info *sinfo = block_rsv->space_info;
5314         u64 num_bytes;
5315
5316         num_bytes = calc_global_metadata_size(fs_info);
5317
5318         spin_lock(&sinfo->lock);
5319         spin_lock(&block_rsv->lock);
5320
5321         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5322
5323         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5324                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5325                     sinfo->bytes_may_use;
5326
5327         if (sinfo->total_bytes > num_bytes) {
5328                 num_bytes = sinfo->total_bytes - num_bytes;
5329                 block_rsv->reserved += num_bytes;
5330                 sinfo->bytes_may_use += num_bytes;
5331                 trace_btrfs_space_reservation(fs_info, "space_info",
5332                                       sinfo->flags, num_bytes, 1);
5333         }
5334
5335         if (block_rsv->reserved >= block_rsv->size) {
5336                 num_bytes = block_rsv->reserved - block_rsv->size;
5337                 sinfo->bytes_may_use -= num_bytes;
5338                 trace_btrfs_space_reservation(fs_info, "space_info",
5339                                       sinfo->flags, num_bytes, 0);
5340                 block_rsv->reserved = block_rsv->size;
5341                 block_rsv->full = 1;
5342         }
5343
5344         spin_unlock(&block_rsv->lock);
5345         spin_unlock(&sinfo->lock);
5346 }
5347
5348 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5349 {
5350         struct btrfs_space_info *space_info;
5351
5352         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5353         fs_info->chunk_block_rsv.space_info = space_info;
5354
5355         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5356         fs_info->global_block_rsv.space_info = space_info;
5357         fs_info->delalloc_block_rsv.space_info = space_info;
5358         fs_info->trans_block_rsv.space_info = space_info;
5359         fs_info->empty_block_rsv.space_info = space_info;
5360         fs_info->delayed_block_rsv.space_info = space_info;
5361
5362         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5363         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5364         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5365         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5366         if (fs_info->quota_root)
5367                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5368         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5369
5370         update_global_block_rsv(fs_info);
5371 }
5372
5373 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5374 {
5375         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5376                                 (u64)-1);
5377         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5378         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5379         WARN_ON(fs_info->trans_block_rsv.size > 0);
5380         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5381         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5382         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5383         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5384         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5385 }
5386
5387 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5388                                   struct btrfs_root *root)
5389 {
5390         if (!trans->block_rsv)
5391                 return;
5392
5393         if (!trans->bytes_reserved)
5394                 return;
5395
5396         trace_btrfs_space_reservation(root->fs_info, "transaction",
5397                                       trans->transid, trans->bytes_reserved, 0);
5398         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5399         trans->bytes_reserved = 0;
5400 }
5401
5402 /*
5403  * To be called after all the new block groups attached to the transaction
5404  * handle have been created (btrfs_create_pending_block_groups()).
5405  */
5406 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5407 {
5408         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5409
5410         if (!trans->chunk_bytes_reserved)
5411                 return;
5412
5413         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5414
5415         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5416                                 trans->chunk_bytes_reserved);
5417         trans->chunk_bytes_reserved = 0;
5418 }
5419
5420 /* Can only return 0 or -ENOSPC */
5421 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5422                                   struct inode *inode)
5423 {
5424         struct btrfs_root *root = BTRFS_I(inode)->root;
5425         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5426         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5427
5428         /*
5429          * We need to hold space in order to delete our orphan item once we've
5430          * added it, so this takes the reservation so we can release it later
5431          * when we are truly done with the orphan item.
5432          */
5433         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5434         trace_btrfs_space_reservation(root->fs_info, "orphan",
5435                                       btrfs_ino(inode), num_bytes, 1);
5436         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5437 }
5438
5439 void btrfs_orphan_release_metadata(struct inode *inode)
5440 {
5441         struct btrfs_root *root = BTRFS_I(inode)->root;
5442         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5443         trace_btrfs_space_reservation(root->fs_info, "orphan",
5444                                       btrfs_ino(inode), num_bytes, 0);
5445         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5446 }
5447
5448 /*
5449  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5450  * root: the root of the parent directory
5451  * rsv: block reservation
5452  * items: the number of items that we need do reservation
5453  * qgroup_reserved: used to return the reserved size in qgroup
5454  *
5455  * This function is used to reserve the space for snapshot/subvolume
5456  * creation and deletion. Those operations are different with the
5457  * common file/directory operations, they change two fs/file trees
5458  * and root tree, the number of items that the qgroup reserves is
5459  * different with the free space reservation. So we can not use
5460  * the space reseravtion mechanism in start_transaction().
5461  */
5462 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5463                                      struct btrfs_block_rsv *rsv,
5464                                      int items,
5465                                      u64 *qgroup_reserved,
5466                                      bool use_global_rsv)
5467 {
5468         u64 num_bytes;
5469         int ret;
5470         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5471
5472         if (root->fs_info->quota_enabled) {
5473                 /* One for parent inode, two for dir entries */
5474                 num_bytes = 3 * root->nodesize;
5475                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5476                 if (ret)
5477                         return ret;
5478         } else {
5479                 num_bytes = 0;
5480         }
5481
5482         *qgroup_reserved = num_bytes;
5483
5484         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5485         rsv->space_info = __find_space_info(root->fs_info,
5486                                             BTRFS_BLOCK_GROUP_METADATA);
5487         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5488                                   BTRFS_RESERVE_FLUSH_ALL);
5489
5490         if (ret == -ENOSPC && use_global_rsv)
5491                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5492
5493         if (ret && *qgroup_reserved)
5494                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5495
5496         return ret;
5497 }
5498
5499 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5500                                       struct btrfs_block_rsv *rsv,
5501                                       u64 qgroup_reserved)
5502 {
5503         btrfs_block_rsv_release(root, rsv, (u64)-1);
5504 }
5505
5506 /**
5507  * drop_outstanding_extent - drop an outstanding extent
5508  * @inode: the inode we're dropping the extent for
5509  * @num_bytes: the number of bytes we're relaseing.
5510  *
5511  * This is called when we are freeing up an outstanding extent, either called
5512  * after an error or after an extent is written.  This will return the number of
5513  * reserved extents that need to be freed.  This must be called with
5514  * BTRFS_I(inode)->lock held.
5515  */
5516 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5517 {
5518         unsigned drop_inode_space = 0;
5519         unsigned dropped_extents = 0;
5520         unsigned num_extents = 0;
5521
5522         num_extents = (unsigned)div64_u64(num_bytes +
5523                                           BTRFS_MAX_EXTENT_SIZE - 1,
5524                                           BTRFS_MAX_EXTENT_SIZE);
5525         ASSERT(num_extents);
5526         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5527         BTRFS_I(inode)->outstanding_extents -= num_extents;
5528
5529         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5530             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5531                                &BTRFS_I(inode)->runtime_flags))
5532                 drop_inode_space = 1;
5533
5534         /*
5535          * If we have more or the same amount of outsanding extents than we have
5536          * reserved then we need to leave the reserved extents count alone.
5537          */
5538         if (BTRFS_I(inode)->outstanding_extents >=
5539             BTRFS_I(inode)->reserved_extents)
5540                 return drop_inode_space;
5541
5542         dropped_extents = BTRFS_I(inode)->reserved_extents -
5543                 BTRFS_I(inode)->outstanding_extents;
5544         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5545         return dropped_extents + drop_inode_space;
5546 }
5547
5548 /**
5549  * calc_csum_metadata_size - return the amount of metada space that must be
5550  *      reserved/free'd for the given bytes.
5551  * @inode: the inode we're manipulating
5552  * @num_bytes: the number of bytes in question
5553  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5554  *
5555  * This adjusts the number of csum_bytes in the inode and then returns the
5556  * correct amount of metadata that must either be reserved or freed.  We
5557  * calculate how many checksums we can fit into one leaf and then divide the
5558  * number of bytes that will need to be checksumed by this value to figure out
5559  * how many checksums will be required.  If we are adding bytes then the number
5560  * may go up and we will return the number of additional bytes that must be
5561  * reserved.  If it is going down we will return the number of bytes that must
5562  * be freed.
5563  *
5564  * This must be called with BTRFS_I(inode)->lock held.
5565  */
5566 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5567                                    int reserve)
5568 {
5569         struct btrfs_root *root = BTRFS_I(inode)->root;
5570         u64 old_csums, num_csums;
5571
5572         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5573             BTRFS_I(inode)->csum_bytes == 0)
5574                 return 0;
5575
5576         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5577         if (reserve)
5578                 BTRFS_I(inode)->csum_bytes += num_bytes;
5579         else
5580                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5581         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5582
5583         /* No change, no need to reserve more */
5584         if (old_csums == num_csums)
5585                 return 0;
5586
5587         if (reserve)
5588                 return btrfs_calc_trans_metadata_size(root,
5589                                                       num_csums - old_csums);
5590
5591         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5592 }
5593
5594 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5595 {
5596         struct btrfs_root *root = BTRFS_I(inode)->root;
5597         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5598         u64 to_reserve = 0;
5599         u64 csum_bytes;
5600         unsigned nr_extents = 0;
5601         int extra_reserve = 0;
5602         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5603         int ret = 0;
5604         bool delalloc_lock = true;
5605         u64 to_free = 0;
5606         unsigned dropped;
5607
5608         /* If we are a free space inode we need to not flush since we will be in
5609          * the middle of a transaction commit.  We also don't need the delalloc
5610          * mutex since we won't race with anybody.  We need this mostly to make
5611          * lockdep shut its filthy mouth.
5612          */
5613         if (btrfs_is_free_space_inode(inode)) {
5614                 flush = BTRFS_RESERVE_NO_FLUSH;
5615                 delalloc_lock = false;
5616         }
5617
5618         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5619             btrfs_transaction_in_commit(root->fs_info))
5620                 schedule_timeout(1);
5621
5622         if (delalloc_lock)
5623                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5624
5625         num_bytes = ALIGN(num_bytes, root->sectorsize);
5626
5627         spin_lock(&BTRFS_I(inode)->lock);
5628         nr_extents = (unsigned)div64_u64(num_bytes +
5629                                          BTRFS_MAX_EXTENT_SIZE - 1,
5630                                          BTRFS_MAX_EXTENT_SIZE);
5631         BTRFS_I(inode)->outstanding_extents += nr_extents;
5632         nr_extents = 0;
5633
5634         if (BTRFS_I(inode)->outstanding_extents >
5635             BTRFS_I(inode)->reserved_extents)
5636                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5637                         BTRFS_I(inode)->reserved_extents;
5638
5639         /*
5640          * Add an item to reserve for updating the inode when we complete the
5641          * delalloc io.
5642          */
5643         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5644                       &BTRFS_I(inode)->runtime_flags)) {
5645                 nr_extents++;
5646                 extra_reserve = 1;
5647         }
5648
5649         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5650         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5651         csum_bytes = BTRFS_I(inode)->csum_bytes;
5652         spin_unlock(&BTRFS_I(inode)->lock);
5653
5654         if (root->fs_info->quota_enabled) {
5655                 ret = btrfs_qgroup_reserve_meta(root,
5656                                 nr_extents * root->nodesize);
5657                 if (ret)
5658                         goto out_fail;
5659         }
5660
5661         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5662         if (unlikely(ret)) {
5663                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5664                 goto out_fail;
5665         }
5666
5667         spin_lock(&BTRFS_I(inode)->lock);
5668         if (extra_reserve) {
5669                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5670                         &BTRFS_I(inode)->runtime_flags);
5671                 nr_extents--;
5672         }
5673         BTRFS_I(inode)->reserved_extents += nr_extents;
5674         spin_unlock(&BTRFS_I(inode)->lock);
5675
5676         if (delalloc_lock)
5677                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5678
5679         if (to_reserve)
5680                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5681                                               btrfs_ino(inode), to_reserve, 1);
5682         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5683
5684         return 0;
5685
5686 out_fail:
5687         spin_lock(&BTRFS_I(inode)->lock);
5688         dropped = drop_outstanding_extent(inode, num_bytes);
5689         /*
5690          * If the inodes csum_bytes is the same as the original
5691          * csum_bytes then we know we haven't raced with any free()ers
5692          * so we can just reduce our inodes csum bytes and carry on.
5693          */
5694         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5695                 calc_csum_metadata_size(inode, num_bytes, 0);
5696         } else {
5697                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5698                 u64 bytes;
5699
5700                 /*
5701                  * This is tricky, but first we need to figure out how much we
5702                  * free'd from any free-ers that occured during this
5703                  * reservation, so we reset ->csum_bytes to the csum_bytes
5704                  * before we dropped our lock, and then call the free for the
5705                  * number of bytes that were freed while we were trying our
5706                  * reservation.
5707                  */
5708                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5709                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5710                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5711
5712
5713                 /*
5714                  * Now we need to see how much we would have freed had we not
5715                  * been making this reservation and our ->csum_bytes were not
5716                  * artificially inflated.
5717                  */
5718                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5719                 bytes = csum_bytes - orig_csum_bytes;
5720                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5721
5722                 /*
5723                  * Now reset ->csum_bytes to what it should be.  If bytes is
5724                  * more than to_free then we would have free'd more space had we
5725                  * not had an artificially high ->csum_bytes, so we need to free
5726                  * the remainder.  If bytes is the same or less then we don't
5727                  * need to do anything, the other free-ers did the correct
5728                  * thing.
5729                  */
5730                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5731                 if (bytes > to_free)
5732                         to_free = bytes - to_free;
5733                 else
5734                         to_free = 0;
5735         }
5736         spin_unlock(&BTRFS_I(inode)->lock);
5737         if (dropped)
5738                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5739
5740         if (to_free) {
5741                 btrfs_block_rsv_release(root, block_rsv, to_free);
5742                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5743                                               btrfs_ino(inode), to_free, 0);
5744         }
5745         if (delalloc_lock)
5746                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5747         return ret;
5748 }
5749
5750 /**
5751  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5752  * @inode: the inode to release the reservation for
5753  * @num_bytes: the number of bytes we're releasing
5754  *
5755  * This will release the metadata reservation for an inode.  This can be called
5756  * once we complete IO for a given set of bytes to release their metadata
5757  * reservations.
5758  */
5759 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5760 {
5761         struct btrfs_root *root = BTRFS_I(inode)->root;
5762         u64 to_free = 0;
5763         unsigned dropped;
5764
5765         num_bytes = ALIGN(num_bytes, root->sectorsize);
5766         spin_lock(&BTRFS_I(inode)->lock);
5767         dropped = drop_outstanding_extent(inode, num_bytes);
5768
5769         if (num_bytes)
5770                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5771         spin_unlock(&BTRFS_I(inode)->lock);
5772         if (dropped > 0)
5773                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5774
5775         if (btrfs_test_is_dummy_root(root))
5776                 return;
5777
5778         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5779                                       btrfs_ino(inode), to_free, 0);
5780
5781         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5782                                 to_free);
5783 }
5784
5785 /**
5786  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5787  * delalloc
5788  * @inode: inode we're writing to
5789  * @start: start range we are writing to
5790  * @len: how long the range we are writing to
5791  *
5792  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5793  *
5794  * This will do the following things
5795  *
5796  * o reserve space in data space info for num bytes
5797  *   and reserve precious corresponding qgroup space
5798  *   (Done in check_data_free_space)
5799  *
5800  * o reserve space for metadata space, based on the number of outstanding
5801  *   extents and how much csums will be needed
5802  *   also reserve metadata space in a per root over-reserve method.
5803  * o add to the inodes->delalloc_bytes
5804  * o add it to the fs_info's delalloc inodes list.
5805  *   (Above 3 all done in delalloc_reserve_metadata)
5806  *
5807  * Return 0 for success
5808  * Return <0 for error(-ENOSPC or -EQUOT)
5809  */
5810 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5811 {
5812         int ret;
5813
5814         ret = btrfs_check_data_free_space(inode, start, len);
5815         if (ret < 0)
5816                 return ret;
5817         ret = btrfs_delalloc_reserve_metadata(inode, len);
5818         if (ret < 0)
5819                 btrfs_free_reserved_data_space(inode, start, len);
5820         return ret;
5821 }
5822
5823 /**
5824  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5825  * @inode: inode we're releasing space for
5826  * @start: start position of the space already reserved
5827  * @len: the len of the space already reserved
5828  *
5829  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5830  * called in the case that we don't need the metadata AND data reservations
5831  * anymore.  So if there is an error or we insert an inline extent.
5832  *
5833  * This function will release the metadata space that was not used and will
5834  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5835  * list if there are no delalloc bytes left.
5836  * Also it will handle the qgroup reserved space.
5837  */
5838 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5839 {
5840         btrfs_delalloc_release_metadata(inode, len);
5841         btrfs_free_reserved_data_space(inode, start, len);
5842 }
5843
5844 static int update_block_group(struct btrfs_trans_handle *trans,
5845                               struct btrfs_root *root, u64 bytenr,
5846                               u64 num_bytes, int alloc)
5847 {
5848         struct btrfs_block_group_cache *cache = NULL;
5849         struct btrfs_fs_info *info = root->fs_info;
5850         u64 total = num_bytes;
5851         u64 old_val;
5852         u64 byte_in_group;
5853         int factor;
5854
5855         /* block accounting for super block */
5856         spin_lock(&info->delalloc_root_lock);
5857         old_val = btrfs_super_bytes_used(info->super_copy);
5858         if (alloc)
5859                 old_val += num_bytes;
5860         else
5861                 old_val -= num_bytes;
5862         btrfs_set_super_bytes_used(info->super_copy, old_val);
5863         spin_unlock(&info->delalloc_root_lock);
5864
5865         while (total) {
5866                 cache = btrfs_lookup_block_group(info, bytenr);
5867                 if (!cache)
5868                         return -ENOENT;
5869                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5870                                     BTRFS_BLOCK_GROUP_RAID1 |
5871                                     BTRFS_BLOCK_GROUP_RAID10))
5872                         factor = 2;
5873                 else
5874                         factor = 1;
5875                 /*
5876                  * If this block group has free space cache written out, we
5877                  * need to make sure to load it if we are removing space.  This
5878                  * is because we need the unpinning stage to actually add the
5879                  * space back to the block group, otherwise we will leak space.
5880                  */
5881                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5882                         cache_block_group(cache, 1);
5883
5884                 byte_in_group = bytenr - cache->key.objectid;
5885                 WARN_ON(byte_in_group > cache->key.offset);
5886
5887                 spin_lock(&cache->space_info->lock);
5888                 spin_lock(&cache->lock);
5889
5890                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5891                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5892                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5893
5894                 old_val = btrfs_block_group_used(&cache->item);
5895                 num_bytes = min(total, cache->key.offset - byte_in_group);
5896                 if (alloc) {
5897                         old_val += num_bytes;
5898                         btrfs_set_block_group_used(&cache->item, old_val);
5899                         cache->reserved -= num_bytes;
5900                         cache->space_info->bytes_reserved -= num_bytes;
5901                         cache->space_info->bytes_used += num_bytes;
5902                         cache->space_info->disk_used += num_bytes * factor;
5903                         spin_unlock(&cache->lock);
5904                         spin_unlock(&cache->space_info->lock);
5905                 } else {
5906                         old_val -= num_bytes;
5907                         btrfs_set_block_group_used(&cache->item, old_val);
5908                         cache->pinned += num_bytes;
5909                         cache->space_info->bytes_pinned += num_bytes;
5910                         cache->space_info->bytes_used -= num_bytes;
5911                         cache->space_info->disk_used -= num_bytes * factor;
5912                         spin_unlock(&cache->lock);
5913                         spin_unlock(&cache->space_info->lock);
5914
5915                         set_extent_dirty(info->pinned_extents,
5916                                          bytenr, bytenr + num_bytes - 1,
5917                                          GFP_NOFS | __GFP_NOFAIL);
5918                         /*
5919                          * No longer have used bytes in this block group, queue
5920                          * it for deletion.
5921                          */
5922                         if (old_val == 0) {
5923                                 spin_lock(&info->unused_bgs_lock);
5924                                 if (list_empty(&cache->bg_list)) {
5925                                         btrfs_get_block_group(cache);
5926                                         list_add_tail(&cache->bg_list,
5927                                                       &info->unused_bgs);
5928                                 }
5929                                 spin_unlock(&info->unused_bgs_lock);
5930                         }
5931                 }
5932
5933                 spin_lock(&trans->transaction->dirty_bgs_lock);
5934                 if (list_empty(&cache->dirty_list)) {
5935                         list_add_tail(&cache->dirty_list,
5936                                       &trans->transaction->dirty_bgs);
5937                                 trans->transaction->num_dirty_bgs++;
5938                         btrfs_get_block_group(cache);
5939                 }
5940                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5941
5942                 btrfs_put_block_group(cache);
5943                 total -= num_bytes;
5944                 bytenr += num_bytes;
5945         }
5946         return 0;
5947 }
5948
5949 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5950 {
5951         struct btrfs_block_group_cache *cache;
5952         u64 bytenr;
5953
5954         spin_lock(&root->fs_info->block_group_cache_lock);
5955         bytenr = root->fs_info->first_logical_byte;
5956         spin_unlock(&root->fs_info->block_group_cache_lock);
5957
5958         if (bytenr < (u64)-1)
5959                 return bytenr;
5960
5961         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5962         if (!cache)
5963                 return 0;
5964
5965         bytenr = cache->key.objectid;
5966         btrfs_put_block_group(cache);
5967
5968         return bytenr;
5969 }
5970
5971 static int pin_down_extent(struct btrfs_root *root,
5972                            struct btrfs_block_group_cache *cache,
5973                            u64 bytenr, u64 num_bytes, int reserved)
5974 {
5975         spin_lock(&cache->space_info->lock);
5976         spin_lock(&cache->lock);
5977         cache->pinned += num_bytes;
5978         cache->space_info->bytes_pinned += num_bytes;
5979         if (reserved) {
5980                 cache->reserved -= num_bytes;
5981                 cache->space_info->bytes_reserved -= num_bytes;
5982         }
5983         spin_unlock(&cache->lock);
5984         spin_unlock(&cache->space_info->lock);
5985
5986         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5987                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5988         if (reserved)
5989                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5990         return 0;
5991 }
5992
5993 /*
5994  * this function must be called within transaction
5995  */
5996 int btrfs_pin_extent(struct btrfs_root *root,
5997                      u64 bytenr, u64 num_bytes, int reserved)
5998 {
5999         struct btrfs_block_group_cache *cache;
6000
6001         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6002         BUG_ON(!cache); /* Logic error */
6003
6004         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6005
6006         btrfs_put_block_group(cache);
6007         return 0;
6008 }
6009
6010 /*
6011  * this function must be called within transaction
6012  */
6013 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6014                                     u64 bytenr, u64 num_bytes)
6015 {
6016         struct btrfs_block_group_cache *cache;
6017         int ret;
6018
6019         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6020         if (!cache)
6021                 return -EINVAL;
6022
6023         /*
6024          * pull in the free space cache (if any) so that our pin
6025          * removes the free space from the cache.  We have load_only set
6026          * to one because the slow code to read in the free extents does check
6027          * the pinned extents.
6028          */
6029         cache_block_group(cache, 1);
6030
6031         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6032
6033         /* remove us from the free space cache (if we're there at all) */
6034         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6035         btrfs_put_block_group(cache);
6036         return ret;
6037 }
6038
6039 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6040 {
6041         int ret;
6042         struct btrfs_block_group_cache *block_group;
6043         struct btrfs_caching_control *caching_ctl;
6044
6045         block_group = btrfs_lookup_block_group(root->fs_info, start);
6046         if (!block_group)
6047                 return -EINVAL;
6048
6049         cache_block_group(block_group, 0);
6050         caching_ctl = get_caching_control(block_group);
6051
6052         if (!caching_ctl) {
6053                 /* Logic error */
6054                 BUG_ON(!block_group_cache_done(block_group));
6055                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6056         } else {
6057                 mutex_lock(&caching_ctl->mutex);
6058
6059                 if (start >= caching_ctl->progress) {
6060                         ret = add_excluded_extent(root, start, num_bytes);
6061                 } else if (start + num_bytes <= caching_ctl->progress) {
6062                         ret = btrfs_remove_free_space(block_group,
6063                                                       start, num_bytes);
6064                 } else {
6065                         num_bytes = caching_ctl->progress - start;
6066                         ret = btrfs_remove_free_space(block_group,
6067                                                       start, num_bytes);
6068                         if (ret)
6069                                 goto out_lock;
6070
6071                         num_bytes = (start + num_bytes) -
6072                                 caching_ctl->progress;
6073                         start = caching_ctl->progress;
6074                         ret = add_excluded_extent(root, start, num_bytes);
6075                 }
6076 out_lock:
6077                 mutex_unlock(&caching_ctl->mutex);
6078                 put_caching_control(caching_ctl);
6079         }
6080         btrfs_put_block_group(block_group);
6081         return ret;
6082 }
6083
6084 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6085                                  struct extent_buffer *eb)
6086 {
6087         struct btrfs_file_extent_item *item;
6088         struct btrfs_key key;
6089         int found_type;
6090         int i;
6091
6092         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6093                 return 0;
6094
6095         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6096                 btrfs_item_key_to_cpu(eb, &key, i);
6097                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6098                         continue;
6099                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6100                 found_type = btrfs_file_extent_type(eb, item);
6101                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6102                         continue;
6103                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6104                         continue;
6105                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6106                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6107                 __exclude_logged_extent(log, key.objectid, key.offset);
6108         }
6109
6110         return 0;
6111 }
6112
6113 /**
6114  * btrfs_update_reserved_bytes - update the block_group and space info counters
6115  * @cache:      The cache we are manipulating
6116  * @num_bytes:  The number of bytes in question
6117  * @reserve:    One of the reservation enums
6118  * @delalloc:   The blocks are allocated for the delalloc write
6119  *
6120  * This is called by the allocator when it reserves space, or by somebody who is
6121  * freeing space that was never actually used on disk.  For example if you
6122  * reserve some space for a new leaf in transaction A and before transaction A
6123  * commits you free that leaf, you call this with reserve set to 0 in order to
6124  * clear the reservation.
6125  *
6126  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6127  * ENOSPC accounting.  For data we handle the reservation through clearing the
6128  * delalloc bits in the io_tree.  We have to do this since we could end up
6129  * allocating less disk space for the amount of data we have reserved in the
6130  * case of compression.
6131  *
6132  * If this is a reservation and the block group has become read only we cannot
6133  * make the reservation and return -EAGAIN, otherwise this function always
6134  * succeeds.
6135  */
6136 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6137                                        u64 num_bytes, int reserve, int delalloc)
6138 {
6139         struct btrfs_space_info *space_info = cache->space_info;
6140         int ret = 0;
6141
6142         spin_lock(&space_info->lock);
6143         spin_lock(&cache->lock);
6144         if (reserve != RESERVE_FREE) {
6145                 if (cache->ro) {
6146                         ret = -EAGAIN;
6147                 } else {
6148                         cache->reserved += num_bytes;
6149                         space_info->bytes_reserved += num_bytes;
6150                         if (reserve == RESERVE_ALLOC) {
6151                                 trace_btrfs_space_reservation(cache->fs_info,
6152                                                 "space_info", space_info->flags,
6153                                                 num_bytes, 0);
6154                                 space_info->bytes_may_use -= num_bytes;
6155                         }
6156
6157                         if (delalloc)
6158                                 cache->delalloc_bytes += num_bytes;
6159                 }
6160         } else {
6161                 if (cache->ro)
6162                         space_info->bytes_readonly += num_bytes;
6163                 cache->reserved -= num_bytes;
6164                 space_info->bytes_reserved -= num_bytes;
6165
6166                 if (delalloc)
6167                         cache->delalloc_bytes -= num_bytes;
6168         }
6169         spin_unlock(&cache->lock);
6170         spin_unlock(&space_info->lock);
6171         return ret;
6172 }
6173
6174 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6175                                 struct btrfs_root *root)
6176 {
6177         struct btrfs_fs_info *fs_info = root->fs_info;
6178         struct btrfs_caching_control *next;
6179         struct btrfs_caching_control *caching_ctl;
6180         struct btrfs_block_group_cache *cache;
6181
6182         down_write(&fs_info->commit_root_sem);
6183
6184         list_for_each_entry_safe(caching_ctl, next,
6185                                  &fs_info->caching_block_groups, list) {
6186                 cache = caching_ctl->block_group;
6187                 if (block_group_cache_done(cache)) {
6188                         cache->last_byte_to_unpin = (u64)-1;
6189                         list_del_init(&caching_ctl->list);
6190                         put_caching_control(caching_ctl);
6191                 } else {
6192                         cache->last_byte_to_unpin = caching_ctl->progress;
6193                 }
6194         }
6195
6196         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6197                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6198         else
6199                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6200
6201         up_write(&fs_info->commit_root_sem);
6202
6203         update_global_block_rsv(fs_info);
6204 }
6205
6206 /*
6207  * Returns the free cluster for the given space info and sets empty_cluster to
6208  * what it should be based on the mount options.
6209  */
6210 static struct btrfs_free_cluster *
6211 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6212                    u64 *empty_cluster)
6213 {
6214         struct btrfs_free_cluster *ret = NULL;
6215         bool ssd = btrfs_test_opt(root, SSD);
6216
6217         *empty_cluster = 0;
6218         if (btrfs_mixed_space_info(space_info))
6219                 return ret;
6220
6221         if (ssd)
6222                 *empty_cluster = 2 * 1024 * 1024;
6223         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6224                 ret = &root->fs_info->meta_alloc_cluster;
6225                 if (!ssd)
6226                         *empty_cluster = 64 * 1024;
6227         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6228                 ret = &root->fs_info->data_alloc_cluster;
6229         }
6230
6231         return ret;
6232 }
6233
6234 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6235                               const bool return_free_space)
6236 {
6237         struct btrfs_fs_info *fs_info = root->fs_info;
6238         struct btrfs_block_group_cache *cache = NULL;
6239         struct btrfs_space_info *space_info;
6240         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6241         struct btrfs_free_cluster *cluster = NULL;
6242         u64 len;
6243         u64 total_unpinned = 0;
6244         u64 empty_cluster = 0;
6245         bool readonly;
6246
6247         while (start <= end) {
6248                 readonly = false;
6249                 if (!cache ||
6250                     start >= cache->key.objectid + cache->key.offset) {
6251                         if (cache)
6252                                 btrfs_put_block_group(cache);
6253                         total_unpinned = 0;
6254                         cache = btrfs_lookup_block_group(fs_info, start);
6255                         BUG_ON(!cache); /* Logic error */
6256
6257                         cluster = fetch_cluster_info(root,
6258                                                      cache->space_info,
6259                                                      &empty_cluster);
6260                         empty_cluster <<= 1;
6261                 }
6262
6263                 len = cache->key.objectid + cache->key.offset - start;
6264                 len = min(len, end + 1 - start);
6265
6266                 if (start < cache->last_byte_to_unpin) {
6267                         len = min(len, cache->last_byte_to_unpin - start);
6268                         if (return_free_space)
6269                                 btrfs_add_free_space(cache, start, len);
6270                 }
6271
6272                 start += len;
6273                 total_unpinned += len;
6274                 space_info = cache->space_info;
6275
6276                 /*
6277                  * If this space cluster has been marked as fragmented and we've
6278                  * unpinned enough in this block group to potentially allow a
6279                  * cluster to be created inside of it go ahead and clear the
6280                  * fragmented check.
6281                  */
6282                 if (cluster && cluster->fragmented &&
6283                     total_unpinned > empty_cluster) {
6284                         spin_lock(&cluster->lock);
6285                         cluster->fragmented = 0;
6286                         spin_unlock(&cluster->lock);
6287                 }
6288
6289                 spin_lock(&space_info->lock);
6290                 spin_lock(&cache->lock);
6291                 cache->pinned -= len;
6292                 space_info->bytes_pinned -= len;
6293                 space_info->max_extent_size = 0;
6294                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6295                 if (cache->ro) {
6296                         space_info->bytes_readonly += len;
6297                         readonly = true;
6298                 }
6299                 spin_unlock(&cache->lock);
6300                 if (!readonly && global_rsv->space_info == space_info) {
6301                         spin_lock(&global_rsv->lock);
6302                         if (!global_rsv->full) {
6303                                 len = min(len, global_rsv->size -
6304                                           global_rsv->reserved);
6305                                 global_rsv->reserved += len;
6306                                 space_info->bytes_may_use += len;
6307                                 if (global_rsv->reserved >= global_rsv->size)
6308                                         global_rsv->full = 1;
6309                         }
6310                         spin_unlock(&global_rsv->lock);
6311                 }
6312                 spin_unlock(&space_info->lock);
6313         }
6314
6315         if (cache)
6316                 btrfs_put_block_group(cache);
6317         return 0;
6318 }
6319
6320 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6321                                struct btrfs_root *root)
6322 {
6323         struct btrfs_fs_info *fs_info = root->fs_info;
6324         struct btrfs_block_group_cache *block_group, *tmp;
6325         struct list_head *deleted_bgs;
6326         struct extent_io_tree *unpin;
6327         u64 start;
6328         u64 end;
6329         int ret;
6330
6331         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6332                 unpin = &fs_info->freed_extents[1];
6333         else
6334                 unpin = &fs_info->freed_extents[0];
6335
6336         while (!trans->aborted) {
6337                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6338                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6339                                             EXTENT_DIRTY, NULL);
6340                 if (ret) {
6341                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6342                         break;
6343                 }
6344
6345                 if (btrfs_test_opt(root, DISCARD))
6346                         ret = btrfs_discard_extent(root, start,
6347                                                    end + 1 - start, NULL);
6348
6349                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6350                 unpin_extent_range(root, start, end, true);
6351                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6352                 cond_resched();
6353         }
6354
6355         /*
6356          * Transaction is finished.  We don't need the lock anymore.  We
6357          * do need to clean up the block groups in case of a transaction
6358          * abort.
6359          */
6360         deleted_bgs = &trans->transaction->deleted_bgs;
6361         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6362                 u64 trimmed = 0;
6363
6364                 ret = -EROFS;
6365                 if (!trans->aborted)
6366                         ret = btrfs_discard_extent(root,
6367                                                    block_group->key.objectid,
6368                                                    block_group->key.offset,
6369                                                    &trimmed);
6370
6371                 list_del_init(&block_group->bg_list);
6372                 btrfs_put_block_group_trimming(block_group);
6373                 btrfs_put_block_group(block_group);
6374
6375                 if (ret) {
6376                         const char *errstr = btrfs_decode_error(ret);
6377                         btrfs_warn(fs_info,
6378                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6379                                    ret, errstr);
6380                 }
6381         }
6382
6383         return 0;
6384 }
6385
6386 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6387                              u64 owner, u64 root_objectid)
6388 {
6389         struct btrfs_space_info *space_info;
6390         u64 flags;
6391
6392         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6393                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6394                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6395                 else
6396                         flags = BTRFS_BLOCK_GROUP_METADATA;
6397         } else {
6398                 flags = BTRFS_BLOCK_GROUP_DATA;
6399         }
6400
6401         space_info = __find_space_info(fs_info, flags);
6402         BUG_ON(!space_info); /* Logic bug */
6403         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6404 }
6405
6406
6407 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6408                                 struct btrfs_root *root,
6409                                 struct btrfs_delayed_ref_node *node, u64 parent,
6410                                 u64 root_objectid, u64 owner_objectid,
6411                                 u64 owner_offset, int refs_to_drop,
6412                                 struct btrfs_delayed_extent_op *extent_op)
6413 {
6414         struct btrfs_key key;
6415         struct btrfs_path *path;
6416         struct btrfs_fs_info *info = root->fs_info;
6417         struct btrfs_root *extent_root = info->extent_root;
6418         struct extent_buffer *leaf;
6419         struct btrfs_extent_item *ei;
6420         struct btrfs_extent_inline_ref *iref;
6421         int ret;
6422         int is_data;
6423         int extent_slot = 0;
6424         int found_extent = 0;
6425         int num_to_del = 1;
6426         u32 item_size;
6427         u64 refs;
6428         u64 bytenr = node->bytenr;
6429         u64 num_bytes = node->num_bytes;
6430         int last_ref = 0;
6431         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6432                                                  SKINNY_METADATA);
6433
6434         path = btrfs_alloc_path();
6435         if (!path)
6436                 return -ENOMEM;
6437
6438         path->reada = 1;
6439         path->leave_spinning = 1;
6440
6441         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6442         BUG_ON(!is_data && refs_to_drop != 1);
6443
6444         if (is_data)
6445                 skinny_metadata = 0;
6446
6447         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6448                                     bytenr, num_bytes, parent,
6449                                     root_objectid, owner_objectid,
6450                                     owner_offset);
6451         if (ret == 0) {
6452                 extent_slot = path->slots[0];
6453                 while (extent_slot >= 0) {
6454                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6455                                               extent_slot);
6456                         if (key.objectid != bytenr)
6457                                 break;
6458                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6459                             key.offset == num_bytes) {
6460                                 found_extent = 1;
6461                                 break;
6462                         }
6463                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6464                             key.offset == owner_objectid) {
6465                                 found_extent = 1;
6466                                 break;
6467                         }
6468                         if (path->slots[0] - extent_slot > 5)
6469                                 break;
6470                         extent_slot--;
6471                 }
6472 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6473                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6474                 if (found_extent && item_size < sizeof(*ei))
6475                         found_extent = 0;
6476 #endif
6477                 if (!found_extent) {
6478                         BUG_ON(iref);
6479                         ret = remove_extent_backref(trans, extent_root, path,
6480                                                     NULL, refs_to_drop,
6481                                                     is_data, &last_ref);
6482                         if (ret) {
6483                                 btrfs_abort_transaction(trans, extent_root, ret);
6484                                 goto out;
6485                         }
6486                         btrfs_release_path(path);
6487                         path->leave_spinning = 1;
6488
6489                         key.objectid = bytenr;
6490                         key.type = BTRFS_EXTENT_ITEM_KEY;
6491                         key.offset = num_bytes;
6492
6493                         if (!is_data && skinny_metadata) {
6494                                 key.type = BTRFS_METADATA_ITEM_KEY;
6495                                 key.offset = owner_objectid;
6496                         }
6497
6498                         ret = btrfs_search_slot(trans, extent_root,
6499                                                 &key, path, -1, 1);
6500                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6501                                 /*
6502                                  * Couldn't find our skinny metadata item,
6503                                  * see if we have ye olde extent item.
6504                                  */
6505                                 path->slots[0]--;
6506                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6507                                                       path->slots[0]);
6508                                 if (key.objectid == bytenr &&
6509                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6510                                     key.offset == num_bytes)
6511                                         ret = 0;
6512                         }
6513
6514                         if (ret > 0 && skinny_metadata) {
6515                                 skinny_metadata = false;
6516                                 key.objectid = bytenr;
6517                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6518                                 key.offset = num_bytes;
6519                                 btrfs_release_path(path);
6520                                 ret = btrfs_search_slot(trans, extent_root,
6521                                                         &key, path, -1, 1);
6522                         }
6523
6524                         if (ret) {
6525                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6526                                         ret, bytenr);
6527                                 if (ret > 0)
6528                                         btrfs_print_leaf(extent_root,
6529                                                          path->nodes[0]);
6530                         }
6531                         if (ret < 0) {
6532                                 btrfs_abort_transaction(trans, extent_root, ret);
6533                                 goto out;
6534                         }
6535                         extent_slot = path->slots[0];
6536                 }
6537         } else if (WARN_ON(ret == -ENOENT)) {
6538                 btrfs_print_leaf(extent_root, path->nodes[0]);
6539                 btrfs_err(info,
6540                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6541                         bytenr, parent, root_objectid, owner_objectid,
6542                         owner_offset);
6543                 btrfs_abort_transaction(trans, extent_root, ret);
6544                 goto out;
6545         } else {
6546                 btrfs_abort_transaction(trans, extent_root, ret);
6547                 goto out;
6548         }
6549
6550         leaf = path->nodes[0];
6551         item_size = btrfs_item_size_nr(leaf, extent_slot);
6552 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6553         if (item_size < sizeof(*ei)) {
6554                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6555                 ret = convert_extent_item_v0(trans, extent_root, path,
6556                                              owner_objectid, 0);
6557                 if (ret < 0) {
6558                         btrfs_abort_transaction(trans, extent_root, ret);
6559                         goto out;
6560                 }
6561
6562                 btrfs_release_path(path);
6563                 path->leave_spinning = 1;
6564
6565                 key.objectid = bytenr;
6566                 key.type = BTRFS_EXTENT_ITEM_KEY;
6567                 key.offset = num_bytes;
6568
6569                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6570                                         -1, 1);
6571                 if (ret) {
6572                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6573                                 ret, bytenr);
6574                         btrfs_print_leaf(extent_root, path->nodes[0]);
6575                 }
6576                 if (ret < 0) {
6577                         btrfs_abort_transaction(trans, extent_root, ret);
6578                         goto out;
6579                 }
6580
6581                 extent_slot = path->slots[0];
6582                 leaf = path->nodes[0];
6583                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6584         }
6585 #endif
6586         BUG_ON(item_size < sizeof(*ei));
6587         ei = btrfs_item_ptr(leaf, extent_slot,
6588                             struct btrfs_extent_item);
6589         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6590             key.type == BTRFS_EXTENT_ITEM_KEY) {
6591                 struct btrfs_tree_block_info *bi;
6592                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6593                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6594                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6595         }
6596
6597         refs = btrfs_extent_refs(leaf, ei);
6598         if (refs < refs_to_drop) {
6599                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6600                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6601                 ret = -EINVAL;
6602                 btrfs_abort_transaction(trans, extent_root, ret);
6603                 goto out;
6604         }
6605         refs -= refs_to_drop;
6606
6607         if (refs > 0) {
6608                 if (extent_op)
6609                         __run_delayed_extent_op(extent_op, leaf, ei);
6610                 /*
6611                  * In the case of inline back ref, reference count will
6612                  * be updated by remove_extent_backref
6613                  */
6614                 if (iref) {
6615                         BUG_ON(!found_extent);
6616                 } else {
6617                         btrfs_set_extent_refs(leaf, ei, refs);
6618                         btrfs_mark_buffer_dirty(leaf);
6619                 }
6620                 if (found_extent) {
6621                         ret = remove_extent_backref(trans, extent_root, path,
6622                                                     iref, refs_to_drop,
6623                                                     is_data, &last_ref);
6624                         if (ret) {
6625                                 btrfs_abort_transaction(trans, extent_root, ret);
6626                                 goto out;
6627                         }
6628                 }
6629                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6630                                  root_objectid);
6631         } else {
6632                 if (found_extent) {
6633                         BUG_ON(is_data && refs_to_drop !=
6634                                extent_data_ref_count(path, iref));
6635                         if (iref) {
6636                                 BUG_ON(path->slots[0] != extent_slot);
6637                         } else {
6638                                 BUG_ON(path->slots[0] != extent_slot + 1);
6639                                 path->slots[0] = extent_slot;
6640                                 num_to_del = 2;
6641                         }
6642                 }
6643
6644                 last_ref = 1;
6645                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6646                                       num_to_del);
6647                 if (ret) {
6648                         btrfs_abort_transaction(trans, extent_root, ret);
6649                         goto out;
6650                 }
6651                 btrfs_release_path(path);
6652
6653                 if (is_data) {
6654                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6655                         if (ret) {
6656                                 btrfs_abort_transaction(trans, extent_root, ret);
6657                                 goto out;
6658                         }
6659                 }
6660
6661                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6662                 if (ret) {
6663                         btrfs_abort_transaction(trans, extent_root, ret);
6664                         goto out;
6665                 }
6666         }
6667         btrfs_release_path(path);
6668
6669 out:
6670         btrfs_free_path(path);
6671         return ret;
6672 }
6673
6674 /*
6675  * when we free an block, it is possible (and likely) that we free the last
6676  * delayed ref for that extent as well.  This searches the delayed ref tree for
6677  * a given extent, and if there are no other delayed refs to be processed, it
6678  * removes it from the tree.
6679  */
6680 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6681                                       struct btrfs_root *root, u64 bytenr)
6682 {
6683         struct btrfs_delayed_ref_head *head;
6684         struct btrfs_delayed_ref_root *delayed_refs;
6685         int ret = 0;
6686
6687         delayed_refs = &trans->transaction->delayed_refs;
6688         spin_lock(&delayed_refs->lock);
6689         head = btrfs_find_delayed_ref_head(trans, bytenr);
6690         if (!head)
6691                 goto out_delayed_unlock;
6692
6693         spin_lock(&head->lock);
6694         if (!list_empty(&head->ref_list))
6695                 goto out;
6696
6697         if (head->extent_op) {
6698                 if (!head->must_insert_reserved)
6699                         goto out;
6700                 btrfs_free_delayed_extent_op(head->extent_op);
6701                 head->extent_op = NULL;
6702         }
6703
6704         /*
6705          * waiting for the lock here would deadlock.  If someone else has it
6706          * locked they are already in the process of dropping it anyway
6707          */
6708         if (!mutex_trylock(&head->mutex))
6709                 goto out;
6710
6711         /*
6712          * at this point we have a head with no other entries.  Go
6713          * ahead and process it.
6714          */
6715         head->node.in_tree = 0;
6716         rb_erase(&head->href_node, &delayed_refs->href_root);
6717
6718         atomic_dec(&delayed_refs->num_entries);
6719
6720         /*
6721          * we don't take a ref on the node because we're removing it from the
6722          * tree, so we just steal the ref the tree was holding.
6723          */
6724         delayed_refs->num_heads--;
6725         if (head->processing == 0)
6726                 delayed_refs->num_heads_ready--;
6727         head->processing = 0;
6728         spin_unlock(&head->lock);
6729         spin_unlock(&delayed_refs->lock);
6730
6731         BUG_ON(head->extent_op);
6732         if (head->must_insert_reserved)
6733                 ret = 1;
6734
6735         mutex_unlock(&head->mutex);
6736         btrfs_put_delayed_ref(&head->node);
6737         return ret;
6738 out:
6739         spin_unlock(&head->lock);
6740
6741 out_delayed_unlock:
6742         spin_unlock(&delayed_refs->lock);
6743         return 0;
6744 }
6745
6746 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6747                            struct btrfs_root *root,
6748                            struct extent_buffer *buf,
6749                            u64 parent, int last_ref)
6750 {
6751         int pin = 1;
6752         int ret;
6753
6754         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6755                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6756                                         buf->start, buf->len,
6757                                         parent, root->root_key.objectid,
6758                                         btrfs_header_level(buf),
6759                                         BTRFS_DROP_DELAYED_REF, NULL);
6760                 BUG_ON(ret); /* -ENOMEM */
6761         }
6762
6763         if (!last_ref)
6764                 return;
6765
6766         if (btrfs_header_generation(buf) == trans->transid) {
6767                 struct btrfs_block_group_cache *cache;
6768
6769                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6770                         ret = check_ref_cleanup(trans, root, buf->start);
6771                         if (!ret)
6772                                 goto out;
6773                 }
6774
6775                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6776
6777                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6778                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6779                         btrfs_put_block_group(cache);
6780                         goto out;
6781                 }
6782
6783                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6784
6785                 btrfs_add_free_space(cache, buf->start, buf->len);
6786                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6787                 btrfs_put_block_group(cache);
6788                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6789                 pin = 0;
6790         }
6791 out:
6792         if (pin)
6793                 add_pinned_bytes(root->fs_info, buf->len,
6794                                  btrfs_header_level(buf),
6795                                  root->root_key.objectid);
6796
6797         /*
6798          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6799          * anymore.
6800          */
6801         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6802 }
6803
6804 /* Can return -ENOMEM */
6805 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6806                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6807                       u64 owner, u64 offset)
6808 {
6809         int ret;
6810         struct btrfs_fs_info *fs_info = root->fs_info;
6811
6812         if (btrfs_test_is_dummy_root(root))
6813                 return 0;
6814
6815         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6816
6817         /*
6818          * tree log blocks never actually go into the extent allocation
6819          * tree, just update pinning info and exit early.
6820          */
6821         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6822                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6823                 /* unlocks the pinned mutex */
6824                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6825                 ret = 0;
6826         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6827                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6828                                         num_bytes,
6829                                         parent, root_objectid, (int)owner,
6830                                         BTRFS_DROP_DELAYED_REF, NULL);
6831         } else {
6832                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6833                                                 num_bytes,
6834                                                 parent, root_objectid, owner,
6835                                                 offset, 0,
6836                                                 BTRFS_DROP_DELAYED_REF, NULL);
6837         }
6838         return ret;
6839 }
6840
6841 /*
6842  * when we wait for progress in the block group caching, its because
6843  * our allocation attempt failed at least once.  So, we must sleep
6844  * and let some progress happen before we try again.
6845  *
6846  * This function will sleep at least once waiting for new free space to
6847  * show up, and then it will check the block group free space numbers
6848  * for our min num_bytes.  Another option is to have it go ahead
6849  * and look in the rbtree for a free extent of a given size, but this
6850  * is a good start.
6851  *
6852  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6853  * any of the information in this block group.
6854  */
6855 static noinline void
6856 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6857                                 u64 num_bytes)
6858 {
6859         struct btrfs_caching_control *caching_ctl;
6860
6861         caching_ctl = get_caching_control(cache);
6862         if (!caching_ctl)
6863                 return;
6864
6865         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6866                    (cache->free_space_ctl->free_space >= num_bytes));
6867
6868         put_caching_control(caching_ctl);
6869 }
6870
6871 static noinline int
6872 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6873 {
6874         struct btrfs_caching_control *caching_ctl;
6875         int ret = 0;
6876
6877         caching_ctl = get_caching_control(cache);
6878         if (!caching_ctl)
6879                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6880
6881         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6882         if (cache->cached == BTRFS_CACHE_ERROR)
6883                 ret = -EIO;
6884         put_caching_control(caching_ctl);
6885         return ret;
6886 }
6887
6888 int __get_raid_index(u64 flags)
6889 {
6890         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6891                 return BTRFS_RAID_RAID10;
6892         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6893                 return BTRFS_RAID_RAID1;
6894         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6895                 return BTRFS_RAID_DUP;
6896         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6897                 return BTRFS_RAID_RAID0;
6898         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6899                 return BTRFS_RAID_RAID5;
6900         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6901                 return BTRFS_RAID_RAID6;
6902
6903         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6904 }
6905
6906 int get_block_group_index(struct btrfs_block_group_cache *cache)
6907 {
6908         return __get_raid_index(cache->flags);
6909 }
6910
6911 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6912         [BTRFS_RAID_RAID10]     = "raid10",
6913         [BTRFS_RAID_RAID1]      = "raid1",
6914         [BTRFS_RAID_DUP]        = "dup",
6915         [BTRFS_RAID_RAID0]      = "raid0",
6916         [BTRFS_RAID_SINGLE]     = "single",
6917         [BTRFS_RAID_RAID5]      = "raid5",
6918         [BTRFS_RAID_RAID6]      = "raid6",
6919 };
6920
6921 static const char *get_raid_name(enum btrfs_raid_types type)
6922 {
6923         if (type >= BTRFS_NR_RAID_TYPES)
6924                 return NULL;
6925
6926         return btrfs_raid_type_names[type];
6927 }
6928
6929 enum btrfs_loop_type {
6930         LOOP_CACHING_NOWAIT = 0,
6931         LOOP_CACHING_WAIT = 1,
6932         LOOP_ALLOC_CHUNK = 2,
6933         LOOP_NO_EMPTY_SIZE = 3,
6934 };
6935
6936 static inline void
6937 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6938                        int delalloc)
6939 {
6940         if (delalloc)
6941                 down_read(&cache->data_rwsem);
6942 }
6943
6944 static inline void
6945 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6946                        int delalloc)
6947 {
6948         btrfs_get_block_group(cache);
6949         if (delalloc)
6950                 down_read(&cache->data_rwsem);
6951 }
6952
6953 static struct btrfs_block_group_cache *
6954 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6955                    struct btrfs_free_cluster *cluster,
6956                    int delalloc)
6957 {
6958         struct btrfs_block_group_cache *used_bg;
6959         bool locked = false;
6960 again:
6961         spin_lock(&cluster->refill_lock);
6962         if (locked) {
6963                 if (used_bg == cluster->block_group)
6964                         return used_bg;
6965
6966                 up_read(&used_bg->data_rwsem);
6967                 btrfs_put_block_group(used_bg);
6968         }
6969
6970         used_bg = cluster->block_group;
6971         if (!used_bg)
6972                 return NULL;
6973
6974         if (used_bg == block_group)
6975                 return used_bg;
6976
6977         btrfs_get_block_group(used_bg);
6978
6979         if (!delalloc)
6980                 return used_bg;
6981
6982         if (down_read_trylock(&used_bg->data_rwsem))
6983                 return used_bg;
6984
6985         spin_unlock(&cluster->refill_lock);
6986         down_read(&used_bg->data_rwsem);
6987         locked = true;
6988         goto again;
6989 }
6990
6991 static inline void
6992 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6993                          int delalloc)
6994 {
6995         if (delalloc)
6996                 up_read(&cache->data_rwsem);
6997         btrfs_put_block_group(cache);
6998 }
6999
7000 /*
7001  * walks the btree of allocated extents and find a hole of a given size.
7002  * The key ins is changed to record the hole:
7003  * ins->objectid == start position
7004  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7005  * ins->offset == the size of the hole.
7006  * Any available blocks before search_start are skipped.
7007  *
7008  * If there is no suitable free space, we will record the max size of
7009  * the free space extent currently.
7010  */
7011 static noinline int find_free_extent(struct btrfs_root *orig_root,
7012                                      u64 num_bytes, u64 empty_size,
7013                                      u64 hint_byte, struct btrfs_key *ins,
7014                                      u64 flags, int delalloc)
7015 {
7016         int ret = 0;
7017         struct btrfs_root *root = orig_root->fs_info->extent_root;
7018         struct btrfs_free_cluster *last_ptr = NULL;
7019         struct btrfs_block_group_cache *block_group = NULL;
7020         u64 search_start = 0;
7021         u64 max_extent_size = 0;
7022         u64 empty_cluster = 0;
7023         struct btrfs_space_info *space_info;
7024         int loop = 0;
7025         int index = __get_raid_index(flags);
7026         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7027                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7028         bool failed_cluster_refill = false;
7029         bool failed_alloc = false;
7030         bool use_cluster = true;
7031         bool have_caching_bg = false;
7032         bool orig_have_caching_bg = false;
7033         bool full_search = false;
7034
7035         WARN_ON(num_bytes < root->sectorsize);
7036         ins->type = BTRFS_EXTENT_ITEM_KEY;
7037         ins->objectid = 0;
7038         ins->offset = 0;
7039
7040         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7041
7042         space_info = __find_space_info(root->fs_info, flags);
7043         if (!space_info) {
7044                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7045                 return -ENOSPC;
7046         }
7047
7048         /*
7049          * If our free space is heavily fragmented we may not be able to make
7050          * big contiguous allocations, so instead of doing the expensive search
7051          * for free space, simply return ENOSPC with our max_extent_size so we
7052          * can go ahead and search for a more manageable chunk.
7053          *
7054          * If our max_extent_size is large enough for our allocation simply
7055          * disable clustering since we will likely not be able to find enough
7056          * space to create a cluster and induce latency trying.
7057          */
7058         if (unlikely(space_info->max_extent_size)) {
7059                 spin_lock(&space_info->lock);
7060                 if (space_info->max_extent_size &&
7061                     num_bytes > space_info->max_extent_size) {
7062                         ins->offset = space_info->max_extent_size;
7063                         spin_unlock(&space_info->lock);
7064                         return -ENOSPC;
7065                 } else if (space_info->max_extent_size) {
7066                         use_cluster = false;
7067                 }
7068                 spin_unlock(&space_info->lock);
7069         }
7070
7071         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7072         if (last_ptr) {
7073                 spin_lock(&last_ptr->lock);
7074                 if (last_ptr->block_group)
7075                         hint_byte = last_ptr->window_start;
7076                 if (last_ptr->fragmented) {
7077                         /*
7078                          * We still set window_start so we can keep track of the
7079                          * last place we found an allocation to try and save
7080                          * some time.
7081                          */
7082                         hint_byte = last_ptr->window_start;
7083                         use_cluster = false;
7084                 }
7085                 spin_unlock(&last_ptr->lock);
7086         }
7087
7088         search_start = max(search_start, first_logical_byte(root, 0));
7089         search_start = max(search_start, hint_byte);
7090         if (search_start == hint_byte) {
7091                 block_group = btrfs_lookup_block_group(root->fs_info,
7092                                                        search_start);
7093                 /*
7094                  * we don't want to use the block group if it doesn't match our
7095                  * allocation bits, or if its not cached.
7096                  *
7097                  * However if we are re-searching with an ideal block group
7098                  * picked out then we don't care that the block group is cached.
7099                  */
7100                 if (block_group && block_group_bits(block_group, flags) &&
7101                     block_group->cached != BTRFS_CACHE_NO) {
7102                         down_read(&space_info->groups_sem);
7103                         if (list_empty(&block_group->list) ||
7104                             block_group->ro) {
7105                                 /*
7106                                  * someone is removing this block group,
7107                                  * we can't jump into the have_block_group
7108                                  * target because our list pointers are not
7109                                  * valid
7110                                  */
7111                                 btrfs_put_block_group(block_group);
7112                                 up_read(&space_info->groups_sem);
7113                         } else {
7114                                 index = get_block_group_index(block_group);
7115                                 btrfs_lock_block_group(block_group, delalloc);
7116                                 goto have_block_group;
7117                         }
7118                 } else if (block_group) {
7119                         btrfs_put_block_group(block_group);
7120                 }
7121         }
7122 search:
7123         have_caching_bg = false;
7124         if (index == 0 || index == __get_raid_index(flags))
7125                 full_search = true;
7126         down_read(&space_info->groups_sem);
7127         list_for_each_entry(block_group, &space_info->block_groups[index],
7128                             list) {
7129                 u64 offset;
7130                 int cached;
7131
7132                 btrfs_grab_block_group(block_group, delalloc);
7133                 search_start = block_group->key.objectid;
7134
7135                 /*
7136                  * this can happen if we end up cycling through all the
7137                  * raid types, but we want to make sure we only allocate
7138                  * for the proper type.
7139                  */
7140                 if (!block_group_bits(block_group, flags)) {
7141                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7142                                 BTRFS_BLOCK_GROUP_RAID1 |
7143                                 BTRFS_BLOCK_GROUP_RAID5 |
7144                                 BTRFS_BLOCK_GROUP_RAID6 |
7145                                 BTRFS_BLOCK_GROUP_RAID10;
7146
7147                         /*
7148                          * if they asked for extra copies and this block group
7149                          * doesn't provide them, bail.  This does allow us to
7150                          * fill raid0 from raid1.
7151                          */
7152                         if ((flags & extra) && !(block_group->flags & extra))
7153                                 goto loop;
7154                 }
7155
7156 have_block_group:
7157                 cached = block_group_cache_done(block_group);
7158                 if (unlikely(!cached)) {
7159                         have_caching_bg = true;
7160                         ret = cache_block_group(block_group, 0);
7161                         BUG_ON(ret < 0);
7162                         ret = 0;
7163                 }
7164
7165                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7166                         goto loop;
7167                 if (unlikely(block_group->ro))
7168                         goto loop;
7169
7170                 /*
7171                  * Ok we want to try and use the cluster allocator, so
7172                  * lets look there
7173                  */
7174                 if (last_ptr && use_cluster) {
7175                         struct btrfs_block_group_cache *used_block_group;
7176                         unsigned long aligned_cluster;
7177                         /*
7178                          * the refill lock keeps out other
7179                          * people trying to start a new cluster
7180                          */
7181                         used_block_group = btrfs_lock_cluster(block_group,
7182                                                               last_ptr,
7183                                                               delalloc);
7184                         if (!used_block_group)
7185                                 goto refill_cluster;
7186
7187                         if (used_block_group != block_group &&
7188                             (used_block_group->ro ||
7189                              !block_group_bits(used_block_group, flags)))
7190                                 goto release_cluster;
7191
7192                         offset = btrfs_alloc_from_cluster(used_block_group,
7193                                                 last_ptr,
7194                                                 num_bytes,
7195                                                 used_block_group->key.objectid,
7196                                                 &max_extent_size);
7197                         if (offset) {
7198                                 /* we have a block, we're done */
7199                                 spin_unlock(&last_ptr->refill_lock);
7200                                 trace_btrfs_reserve_extent_cluster(root,
7201                                                 used_block_group,
7202                                                 search_start, num_bytes);
7203                                 if (used_block_group != block_group) {
7204                                         btrfs_release_block_group(block_group,
7205                                                                   delalloc);
7206                                         block_group = used_block_group;
7207                                 }
7208                                 goto checks;
7209                         }
7210
7211                         WARN_ON(last_ptr->block_group != used_block_group);
7212 release_cluster:
7213                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7214                          * set up a new clusters, so lets just skip it
7215                          * and let the allocator find whatever block
7216                          * it can find.  If we reach this point, we
7217                          * will have tried the cluster allocator
7218                          * plenty of times and not have found
7219                          * anything, so we are likely way too
7220                          * fragmented for the clustering stuff to find
7221                          * anything.
7222                          *
7223                          * However, if the cluster is taken from the
7224                          * current block group, release the cluster
7225                          * first, so that we stand a better chance of
7226                          * succeeding in the unclustered
7227                          * allocation.  */
7228                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7229                             used_block_group != block_group) {
7230                                 spin_unlock(&last_ptr->refill_lock);
7231                                 btrfs_release_block_group(used_block_group,
7232                                                           delalloc);
7233                                 goto unclustered_alloc;
7234                         }
7235
7236                         /*
7237                          * this cluster didn't work out, free it and
7238                          * start over
7239                          */
7240                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7241
7242                         if (used_block_group != block_group)
7243                                 btrfs_release_block_group(used_block_group,
7244                                                           delalloc);
7245 refill_cluster:
7246                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7247                                 spin_unlock(&last_ptr->refill_lock);
7248                                 goto unclustered_alloc;
7249                         }
7250
7251                         aligned_cluster = max_t(unsigned long,
7252                                                 empty_cluster + empty_size,
7253                                               block_group->full_stripe_len);
7254
7255                         /* allocate a cluster in this block group */
7256                         ret = btrfs_find_space_cluster(root, block_group,
7257                                                        last_ptr, search_start,
7258                                                        num_bytes,
7259                                                        aligned_cluster);
7260                         if (ret == 0) {
7261                                 /*
7262                                  * now pull our allocation out of this
7263                                  * cluster
7264                                  */
7265                                 offset = btrfs_alloc_from_cluster(block_group,
7266                                                         last_ptr,
7267                                                         num_bytes,
7268                                                         search_start,
7269                                                         &max_extent_size);
7270                                 if (offset) {
7271                                         /* we found one, proceed */
7272                                         spin_unlock(&last_ptr->refill_lock);
7273                                         trace_btrfs_reserve_extent_cluster(root,
7274                                                 block_group, search_start,
7275                                                 num_bytes);
7276                                         goto checks;
7277                                 }
7278                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7279                                    && !failed_cluster_refill) {
7280                                 spin_unlock(&last_ptr->refill_lock);
7281
7282                                 failed_cluster_refill = true;
7283                                 wait_block_group_cache_progress(block_group,
7284                                        num_bytes + empty_cluster + empty_size);
7285                                 goto have_block_group;
7286                         }
7287
7288                         /*
7289                          * at this point we either didn't find a cluster
7290                          * or we weren't able to allocate a block from our
7291                          * cluster.  Free the cluster we've been trying
7292                          * to use, and go to the next block group
7293                          */
7294                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7295                         spin_unlock(&last_ptr->refill_lock);
7296                         goto loop;
7297                 }
7298
7299 unclustered_alloc:
7300                 /*
7301                  * We are doing an unclustered alloc, set the fragmented flag so
7302                  * we don't bother trying to setup a cluster again until we get
7303                  * more space.
7304                  */
7305                 if (unlikely(last_ptr)) {
7306                         spin_lock(&last_ptr->lock);
7307                         last_ptr->fragmented = 1;
7308                         spin_unlock(&last_ptr->lock);
7309                 }
7310                 spin_lock(&block_group->free_space_ctl->tree_lock);
7311                 if (cached &&
7312                     block_group->free_space_ctl->free_space <
7313                     num_bytes + empty_cluster + empty_size) {
7314                         if (block_group->free_space_ctl->free_space >
7315                             max_extent_size)
7316                                 max_extent_size =
7317                                         block_group->free_space_ctl->free_space;
7318                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7319                         goto loop;
7320                 }
7321                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7322
7323                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7324                                                     num_bytes, empty_size,
7325                                                     &max_extent_size);
7326                 /*
7327                  * If we didn't find a chunk, and we haven't failed on this
7328                  * block group before, and this block group is in the middle of
7329                  * caching and we are ok with waiting, then go ahead and wait
7330                  * for progress to be made, and set failed_alloc to true.
7331                  *
7332                  * If failed_alloc is true then we've already waited on this
7333                  * block group once and should move on to the next block group.
7334                  */
7335                 if (!offset && !failed_alloc && !cached &&
7336                     loop > LOOP_CACHING_NOWAIT) {
7337                         wait_block_group_cache_progress(block_group,
7338                                                 num_bytes + empty_size);
7339                         failed_alloc = true;
7340                         goto have_block_group;
7341                 } else if (!offset) {
7342                         goto loop;
7343                 }
7344 checks:
7345                 search_start = ALIGN(offset, root->stripesize);
7346
7347                 /* move on to the next group */
7348                 if (search_start + num_bytes >
7349                     block_group->key.objectid + block_group->key.offset) {
7350                         btrfs_add_free_space(block_group, offset, num_bytes);
7351                         goto loop;
7352                 }
7353
7354                 if (offset < search_start)
7355                         btrfs_add_free_space(block_group, offset,
7356                                              search_start - offset);
7357                 BUG_ON(offset > search_start);
7358
7359                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7360                                                   alloc_type, delalloc);
7361                 if (ret == -EAGAIN) {
7362                         btrfs_add_free_space(block_group, offset, num_bytes);
7363                         goto loop;
7364                 }
7365
7366                 /* we are all good, lets return */
7367                 ins->objectid = search_start;
7368                 ins->offset = num_bytes;
7369
7370                 trace_btrfs_reserve_extent(orig_root, block_group,
7371                                            search_start, num_bytes);
7372                 btrfs_release_block_group(block_group, delalloc);
7373                 break;
7374 loop:
7375                 failed_cluster_refill = false;
7376                 failed_alloc = false;
7377                 BUG_ON(index != get_block_group_index(block_group));
7378                 btrfs_release_block_group(block_group, delalloc);
7379         }
7380         up_read(&space_info->groups_sem);
7381
7382         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7383                 && !orig_have_caching_bg)
7384                 orig_have_caching_bg = true;
7385
7386         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7387                 goto search;
7388
7389         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7390                 goto search;
7391
7392         /*
7393          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7394          *                      caching kthreads as we move along
7395          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7396          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7397          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7398          *                      again
7399          */
7400         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7401                 index = 0;
7402                 if (loop == LOOP_CACHING_NOWAIT) {
7403                         /*
7404                          * We want to skip the LOOP_CACHING_WAIT step if we
7405                          * don't have any unached bgs and we've alrelady done a
7406                          * full search through.
7407                          */
7408                         if (orig_have_caching_bg || !full_search)
7409                                 loop = LOOP_CACHING_WAIT;
7410                         else
7411                                 loop = LOOP_ALLOC_CHUNK;
7412                 } else {
7413                         loop++;
7414                 }
7415
7416                 if (loop == LOOP_ALLOC_CHUNK) {
7417                         struct btrfs_trans_handle *trans;
7418                         int exist = 0;
7419
7420                         trans = current->journal_info;
7421                         if (trans)
7422                                 exist = 1;
7423                         else
7424                                 trans = btrfs_join_transaction(root);
7425
7426                         if (IS_ERR(trans)) {
7427                                 ret = PTR_ERR(trans);
7428                                 goto out;
7429                         }
7430
7431                         ret = do_chunk_alloc(trans, root, flags,
7432                                              CHUNK_ALLOC_FORCE);
7433
7434                         /*
7435                          * If we can't allocate a new chunk we've already looped
7436                          * through at least once, move on to the NO_EMPTY_SIZE
7437                          * case.
7438                          */
7439                         if (ret == -ENOSPC)
7440                                 loop = LOOP_NO_EMPTY_SIZE;
7441
7442                         /*
7443                          * Do not bail out on ENOSPC since we
7444                          * can do more things.
7445                          */
7446                         if (ret < 0 && ret != -ENOSPC)
7447                                 btrfs_abort_transaction(trans,
7448                                                         root, ret);
7449                         else
7450                                 ret = 0;
7451                         if (!exist)
7452                                 btrfs_end_transaction(trans, root);
7453                         if (ret)
7454                                 goto out;
7455                 }
7456
7457                 if (loop == LOOP_NO_EMPTY_SIZE) {
7458                         /*
7459                          * Don't loop again if we already have no empty_size and
7460                          * no empty_cluster.
7461                          */
7462                         if (empty_size == 0 &&
7463                             empty_cluster == 0) {
7464                                 ret = -ENOSPC;
7465                                 goto out;
7466                         }
7467                         empty_size = 0;
7468                         empty_cluster = 0;
7469                 }
7470
7471                 goto search;
7472         } else if (!ins->objectid) {
7473                 ret = -ENOSPC;
7474         } else if (ins->objectid) {
7475                 if (!use_cluster && last_ptr) {
7476                         spin_lock(&last_ptr->lock);
7477                         last_ptr->window_start = ins->objectid;
7478                         spin_unlock(&last_ptr->lock);
7479                 }
7480                 ret = 0;
7481         }
7482 out:
7483         if (ret == -ENOSPC) {
7484                 spin_lock(&space_info->lock);
7485                 space_info->max_extent_size = max_extent_size;
7486                 spin_unlock(&space_info->lock);
7487                 ins->offset = max_extent_size;
7488         }
7489         return ret;
7490 }
7491
7492 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7493                             int dump_block_groups)
7494 {
7495         struct btrfs_block_group_cache *cache;
7496         int index = 0;
7497
7498         spin_lock(&info->lock);
7499         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7500                info->flags,
7501                info->total_bytes - info->bytes_used - info->bytes_pinned -
7502                info->bytes_reserved - info->bytes_readonly,
7503                (info->full) ? "" : "not ");
7504         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7505                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7506                info->total_bytes, info->bytes_used, info->bytes_pinned,
7507                info->bytes_reserved, info->bytes_may_use,
7508                info->bytes_readonly);
7509         spin_unlock(&info->lock);
7510
7511         if (!dump_block_groups)
7512                 return;
7513
7514         down_read(&info->groups_sem);
7515 again:
7516         list_for_each_entry(cache, &info->block_groups[index], list) {
7517                 spin_lock(&cache->lock);
7518                 printk(KERN_INFO "BTRFS: "
7519                            "block group %llu has %llu bytes, "
7520                            "%llu used %llu pinned %llu reserved %s\n",
7521                        cache->key.objectid, cache->key.offset,
7522                        btrfs_block_group_used(&cache->item), cache->pinned,
7523                        cache->reserved, cache->ro ? "[readonly]" : "");
7524                 btrfs_dump_free_space(cache, bytes);
7525                 spin_unlock(&cache->lock);
7526         }
7527         if (++index < BTRFS_NR_RAID_TYPES)
7528                 goto again;
7529         up_read(&info->groups_sem);
7530 }
7531
7532 int btrfs_reserve_extent(struct btrfs_root *root,
7533                          u64 num_bytes, u64 min_alloc_size,
7534                          u64 empty_size, u64 hint_byte,
7535                          struct btrfs_key *ins, int is_data, int delalloc)
7536 {
7537         bool final_tried = num_bytes == min_alloc_size;
7538         u64 flags;
7539         int ret;
7540
7541         flags = btrfs_get_alloc_profile(root, is_data);
7542 again:
7543         WARN_ON(num_bytes < root->sectorsize);
7544         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7545                                flags, delalloc);
7546
7547         if (ret == -ENOSPC) {
7548                 if (!final_tried && ins->offset) {
7549                         num_bytes = min(num_bytes >> 1, ins->offset);
7550                         num_bytes = round_down(num_bytes, root->sectorsize);
7551                         num_bytes = max(num_bytes, min_alloc_size);
7552                         if (num_bytes == min_alloc_size)
7553                                 final_tried = true;
7554                         goto again;
7555                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7556                         struct btrfs_space_info *sinfo;
7557
7558                         sinfo = __find_space_info(root->fs_info, flags);
7559                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7560                                 flags, num_bytes);
7561                         if (sinfo)
7562                                 dump_space_info(sinfo, num_bytes, 1);
7563                 }
7564         }
7565
7566         return ret;
7567 }
7568
7569 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7570                                         u64 start, u64 len,
7571                                         int pin, int delalloc)
7572 {
7573         struct btrfs_block_group_cache *cache;
7574         int ret = 0;
7575
7576         cache = btrfs_lookup_block_group(root->fs_info, start);
7577         if (!cache) {
7578                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7579                         start);
7580                 return -ENOSPC;
7581         }
7582
7583         if (pin)
7584                 pin_down_extent(root, cache, start, len, 1);
7585         else {
7586                 if (btrfs_test_opt(root, DISCARD))
7587                         ret = btrfs_discard_extent(root, start, len, NULL);
7588                 btrfs_add_free_space(cache, start, len);
7589                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7590         }
7591
7592         btrfs_put_block_group(cache);
7593
7594         trace_btrfs_reserved_extent_free(root, start, len);
7595
7596         return ret;
7597 }
7598
7599 int btrfs_free_reserved_extent(struct btrfs_root *root,
7600                                u64 start, u64 len, int delalloc)
7601 {
7602         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7603 }
7604
7605 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7606                                        u64 start, u64 len)
7607 {
7608         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7609 }
7610
7611 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7612                                       struct btrfs_root *root,
7613                                       u64 parent, u64 root_objectid,
7614                                       u64 flags, u64 owner, u64 offset,
7615                                       struct btrfs_key *ins, int ref_mod)
7616 {
7617         int ret;
7618         struct btrfs_fs_info *fs_info = root->fs_info;
7619         struct btrfs_extent_item *extent_item;
7620         struct btrfs_extent_inline_ref *iref;
7621         struct btrfs_path *path;
7622         struct extent_buffer *leaf;
7623         int type;
7624         u32 size;
7625
7626         if (parent > 0)
7627                 type = BTRFS_SHARED_DATA_REF_KEY;
7628         else
7629                 type = BTRFS_EXTENT_DATA_REF_KEY;
7630
7631         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7632
7633         path = btrfs_alloc_path();
7634         if (!path)
7635                 return -ENOMEM;
7636
7637         path->leave_spinning = 1;
7638         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7639                                       ins, size);
7640         if (ret) {
7641                 btrfs_free_path(path);
7642                 return ret;
7643         }
7644
7645         leaf = path->nodes[0];
7646         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7647                                      struct btrfs_extent_item);
7648         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7649         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7650         btrfs_set_extent_flags(leaf, extent_item,
7651                                flags | BTRFS_EXTENT_FLAG_DATA);
7652
7653         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7654         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7655         if (parent > 0) {
7656                 struct btrfs_shared_data_ref *ref;
7657                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7658                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7659                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7660         } else {
7661                 struct btrfs_extent_data_ref *ref;
7662                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7663                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7664                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7665                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7666                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7667         }
7668
7669         btrfs_mark_buffer_dirty(path->nodes[0]);
7670         btrfs_free_path(path);
7671
7672         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7673         if (ret) { /* -ENOENT, logic error */
7674                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7675                         ins->objectid, ins->offset);
7676                 BUG();
7677         }
7678         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7679         return ret;
7680 }
7681
7682 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7683                                      struct btrfs_root *root,
7684                                      u64 parent, u64 root_objectid,
7685                                      u64 flags, struct btrfs_disk_key *key,
7686                                      int level, struct btrfs_key *ins)
7687 {
7688         int ret;
7689         struct btrfs_fs_info *fs_info = root->fs_info;
7690         struct btrfs_extent_item *extent_item;
7691         struct btrfs_tree_block_info *block_info;
7692         struct btrfs_extent_inline_ref *iref;
7693         struct btrfs_path *path;
7694         struct extent_buffer *leaf;
7695         u32 size = sizeof(*extent_item) + sizeof(*iref);
7696         u64 num_bytes = ins->offset;
7697         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7698                                                  SKINNY_METADATA);
7699
7700         if (!skinny_metadata)
7701                 size += sizeof(*block_info);
7702
7703         path = btrfs_alloc_path();
7704         if (!path) {
7705                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7706                                                    root->nodesize);
7707                 return -ENOMEM;
7708         }
7709
7710         path->leave_spinning = 1;
7711         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7712                                       ins, size);
7713         if (ret) {
7714                 btrfs_free_path(path);
7715                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7716                                                    root->nodesize);
7717                 return ret;
7718         }
7719
7720         leaf = path->nodes[0];
7721         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7722                                      struct btrfs_extent_item);
7723         btrfs_set_extent_refs(leaf, extent_item, 1);
7724         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7725         btrfs_set_extent_flags(leaf, extent_item,
7726                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7727
7728         if (skinny_metadata) {
7729                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7730                 num_bytes = root->nodesize;
7731         } else {
7732                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7733                 btrfs_set_tree_block_key(leaf, block_info, key);
7734                 btrfs_set_tree_block_level(leaf, block_info, level);
7735                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7736         }
7737
7738         if (parent > 0) {
7739                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7740                 btrfs_set_extent_inline_ref_type(leaf, iref,
7741                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7742                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7743         } else {
7744                 btrfs_set_extent_inline_ref_type(leaf, iref,
7745                                                  BTRFS_TREE_BLOCK_REF_KEY);
7746                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7747         }
7748
7749         btrfs_mark_buffer_dirty(leaf);
7750         btrfs_free_path(path);
7751
7752         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7753                                  1);
7754         if (ret) { /* -ENOENT, logic error */
7755                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7756                         ins->objectid, ins->offset);
7757                 BUG();
7758         }
7759
7760         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7761         return ret;
7762 }
7763
7764 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7765                                      struct btrfs_root *root,
7766                                      u64 root_objectid, u64 owner,
7767                                      u64 offset, u64 ram_bytes,
7768                                      struct btrfs_key *ins)
7769 {
7770         int ret;
7771
7772         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7773
7774         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7775                                          ins->offset, 0,
7776                                          root_objectid, owner, offset,
7777                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7778                                          NULL);
7779         return ret;
7780 }
7781
7782 /*
7783  * this is used by the tree logging recovery code.  It records that
7784  * an extent has been allocated and makes sure to clear the free
7785  * space cache bits as well
7786  */
7787 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7788                                    struct btrfs_root *root,
7789                                    u64 root_objectid, u64 owner, u64 offset,
7790                                    struct btrfs_key *ins)
7791 {
7792         int ret;
7793         struct btrfs_block_group_cache *block_group;
7794
7795         /*
7796          * Mixed block groups will exclude before processing the log so we only
7797          * need to do the exlude dance if this fs isn't mixed.
7798          */
7799         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7800                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7801                 if (ret)
7802                         return ret;
7803         }
7804
7805         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7806         if (!block_group)
7807                 return -EINVAL;
7808
7809         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7810                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7811         BUG_ON(ret); /* logic error */
7812         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7813                                          0, owner, offset, ins, 1);
7814         btrfs_put_block_group(block_group);
7815         return ret;
7816 }
7817
7818 static struct extent_buffer *
7819 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7820                       u64 bytenr, int level)
7821 {
7822         struct extent_buffer *buf;
7823
7824         buf = btrfs_find_create_tree_block(root, bytenr);
7825         if (!buf)
7826                 return ERR_PTR(-ENOMEM);
7827         btrfs_set_header_generation(buf, trans->transid);
7828         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7829         btrfs_tree_lock(buf);
7830         clean_tree_block(trans, root->fs_info, buf);
7831         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7832
7833         btrfs_set_lock_blocking(buf);
7834         btrfs_set_buffer_uptodate(buf);
7835
7836         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7837                 buf->log_index = root->log_transid % 2;
7838                 /*
7839                  * we allow two log transactions at a time, use different
7840                  * EXENT bit to differentiate dirty pages.
7841                  */
7842                 if (buf->log_index == 0)
7843                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7844                                         buf->start + buf->len - 1, GFP_NOFS);
7845                 else
7846                         set_extent_new(&root->dirty_log_pages, buf->start,
7847                                         buf->start + buf->len - 1, GFP_NOFS);
7848         } else {
7849                 buf->log_index = -1;
7850                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7851                          buf->start + buf->len - 1, GFP_NOFS);
7852         }
7853         trans->blocks_used++;
7854         /* this returns a buffer locked for blocking */
7855         return buf;
7856 }
7857
7858 static struct btrfs_block_rsv *
7859 use_block_rsv(struct btrfs_trans_handle *trans,
7860               struct btrfs_root *root, u32 blocksize)
7861 {
7862         struct btrfs_block_rsv *block_rsv;
7863         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7864         int ret;
7865         bool global_updated = false;
7866
7867         block_rsv = get_block_rsv(trans, root);
7868
7869         if (unlikely(block_rsv->size == 0))
7870                 goto try_reserve;
7871 again:
7872         ret = block_rsv_use_bytes(block_rsv, blocksize);
7873         if (!ret)
7874                 return block_rsv;
7875
7876         if (block_rsv->failfast)
7877                 return ERR_PTR(ret);
7878
7879         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7880                 global_updated = true;
7881                 update_global_block_rsv(root->fs_info);
7882                 goto again;
7883         }
7884
7885         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7886                 static DEFINE_RATELIMIT_STATE(_rs,
7887                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7888                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7889                 if (__ratelimit(&_rs))
7890                         WARN(1, KERN_DEBUG
7891                                 "BTRFS: block rsv returned %d\n", ret);
7892         }
7893 try_reserve:
7894         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7895                                      BTRFS_RESERVE_NO_FLUSH);
7896         if (!ret)
7897                 return block_rsv;
7898         /*
7899          * If we couldn't reserve metadata bytes try and use some from
7900          * the global reserve if its space type is the same as the global
7901          * reservation.
7902          */
7903         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7904             block_rsv->space_info == global_rsv->space_info) {
7905                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7906                 if (!ret)
7907                         return global_rsv;
7908         }
7909         return ERR_PTR(ret);
7910 }
7911
7912 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7913                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7914 {
7915         block_rsv_add_bytes(block_rsv, blocksize, 0);
7916         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7917 }
7918
7919 /*
7920  * finds a free extent and does all the dirty work required for allocation
7921  * returns the tree buffer or an ERR_PTR on error.
7922  */
7923 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7924                                         struct btrfs_root *root,
7925                                         u64 parent, u64 root_objectid,
7926                                         struct btrfs_disk_key *key, int level,
7927                                         u64 hint, u64 empty_size)
7928 {
7929         struct btrfs_key ins;
7930         struct btrfs_block_rsv *block_rsv;
7931         struct extent_buffer *buf;
7932         struct btrfs_delayed_extent_op *extent_op;
7933         u64 flags = 0;
7934         int ret;
7935         u32 blocksize = root->nodesize;
7936         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7937                                                  SKINNY_METADATA);
7938
7939         if (btrfs_test_is_dummy_root(root)) {
7940                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7941                                             level);
7942                 if (!IS_ERR(buf))
7943                         root->alloc_bytenr += blocksize;
7944                 return buf;
7945         }
7946
7947         block_rsv = use_block_rsv(trans, root, blocksize);
7948         if (IS_ERR(block_rsv))
7949                 return ERR_CAST(block_rsv);
7950
7951         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7952                                    empty_size, hint, &ins, 0, 0);
7953         if (ret)
7954                 goto out_unuse;
7955
7956         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7957         if (IS_ERR(buf)) {
7958                 ret = PTR_ERR(buf);
7959                 goto out_free_reserved;
7960         }
7961
7962         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7963                 if (parent == 0)
7964                         parent = ins.objectid;
7965                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7966         } else
7967                 BUG_ON(parent > 0);
7968
7969         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7970                 extent_op = btrfs_alloc_delayed_extent_op();
7971                 if (!extent_op) {
7972                         ret = -ENOMEM;
7973                         goto out_free_buf;
7974                 }
7975                 if (key)
7976                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7977                 else
7978                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7979                 extent_op->flags_to_set = flags;
7980                 if (skinny_metadata)
7981                         extent_op->update_key = 0;
7982                 else
7983                         extent_op->update_key = 1;
7984                 extent_op->update_flags = 1;
7985                 extent_op->is_data = 0;
7986                 extent_op->level = level;
7987
7988                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7989                                                  ins.objectid, ins.offset,
7990                                                  parent, root_objectid, level,
7991                                                  BTRFS_ADD_DELAYED_EXTENT,
7992                                                  extent_op);
7993                 if (ret)
7994                         goto out_free_delayed;
7995         }
7996         return buf;
7997
7998 out_free_delayed:
7999         btrfs_free_delayed_extent_op(extent_op);
8000 out_free_buf:
8001         free_extent_buffer(buf);
8002 out_free_reserved:
8003         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8004 out_unuse:
8005         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8006         return ERR_PTR(ret);
8007 }
8008
8009 struct walk_control {
8010         u64 refs[BTRFS_MAX_LEVEL];
8011         u64 flags[BTRFS_MAX_LEVEL];
8012         struct btrfs_key update_progress;
8013         int stage;
8014         int level;
8015         int shared_level;
8016         int update_ref;
8017         int keep_locks;
8018         int reada_slot;
8019         int reada_count;
8020         int for_reloc;
8021 };
8022
8023 #define DROP_REFERENCE  1
8024 #define UPDATE_BACKREF  2
8025
8026 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8027                                      struct btrfs_root *root,
8028                                      struct walk_control *wc,
8029                                      struct btrfs_path *path)
8030 {
8031         u64 bytenr;
8032         u64 generation;
8033         u64 refs;
8034         u64 flags;
8035         u32 nritems;
8036         u32 blocksize;
8037         struct btrfs_key key;
8038         struct extent_buffer *eb;
8039         int ret;
8040         int slot;
8041         int nread = 0;
8042
8043         if (path->slots[wc->level] < wc->reada_slot) {
8044                 wc->reada_count = wc->reada_count * 2 / 3;
8045                 wc->reada_count = max(wc->reada_count, 2);
8046         } else {
8047                 wc->reada_count = wc->reada_count * 3 / 2;
8048                 wc->reada_count = min_t(int, wc->reada_count,
8049                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8050         }
8051
8052         eb = path->nodes[wc->level];
8053         nritems = btrfs_header_nritems(eb);
8054         blocksize = root->nodesize;
8055
8056         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8057                 if (nread >= wc->reada_count)
8058                         break;
8059
8060                 cond_resched();
8061                 bytenr = btrfs_node_blockptr(eb, slot);
8062                 generation = btrfs_node_ptr_generation(eb, slot);
8063
8064                 if (slot == path->slots[wc->level])
8065                         goto reada;
8066
8067                 if (wc->stage == UPDATE_BACKREF &&
8068                     generation <= root->root_key.offset)
8069                         continue;
8070
8071                 /* We don't lock the tree block, it's OK to be racy here */
8072                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8073                                                wc->level - 1, 1, &refs,
8074                                                &flags);
8075                 /* We don't care about errors in readahead. */
8076                 if (ret < 0)
8077                         continue;
8078                 BUG_ON(refs == 0);
8079
8080                 if (wc->stage == DROP_REFERENCE) {
8081                         if (refs == 1)
8082                                 goto reada;
8083
8084                         if (wc->level == 1 &&
8085                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8086                                 continue;
8087                         if (!wc->update_ref ||
8088                             generation <= root->root_key.offset)
8089                                 continue;
8090                         btrfs_node_key_to_cpu(eb, &key, slot);
8091                         ret = btrfs_comp_cpu_keys(&key,
8092                                                   &wc->update_progress);
8093                         if (ret < 0)
8094                                 continue;
8095                 } else {
8096                         if (wc->level == 1 &&
8097                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8098                                 continue;
8099                 }
8100 reada:
8101                 readahead_tree_block(root, bytenr);
8102                 nread++;
8103         }
8104         wc->reada_slot = slot;
8105 }
8106
8107 /*
8108  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
8109  * for later qgroup accounting.
8110  *
8111  * Current, this function does nothing.
8112  */
8113 static int account_leaf_items(struct btrfs_trans_handle *trans,
8114                               struct btrfs_root *root,
8115                               struct extent_buffer *eb)
8116 {
8117         int nr = btrfs_header_nritems(eb);
8118         int i, extent_type;
8119         struct btrfs_key key;
8120         struct btrfs_file_extent_item *fi;
8121         u64 bytenr, num_bytes;
8122
8123         for (i = 0; i < nr; i++) {
8124                 btrfs_item_key_to_cpu(eb, &key, i);
8125
8126                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8127                         continue;
8128
8129                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8130                 /* filter out non qgroup-accountable extents  */
8131                 extent_type = btrfs_file_extent_type(eb, fi);
8132
8133                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8134                         continue;
8135
8136                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8137                 if (!bytenr)
8138                         continue;
8139
8140                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8141         }
8142         return 0;
8143 }
8144
8145 /*
8146  * Walk up the tree from the bottom, freeing leaves and any interior
8147  * nodes which have had all slots visited. If a node (leaf or
8148  * interior) is freed, the node above it will have it's slot
8149  * incremented. The root node will never be freed.
8150  *
8151  * At the end of this function, we should have a path which has all
8152  * slots incremented to the next position for a search. If we need to
8153  * read a new node it will be NULL and the node above it will have the
8154  * correct slot selected for a later read.
8155  *
8156  * If we increment the root nodes slot counter past the number of
8157  * elements, 1 is returned to signal completion of the search.
8158  */
8159 static int adjust_slots_upwards(struct btrfs_root *root,
8160                                 struct btrfs_path *path, int root_level)
8161 {
8162         int level = 0;
8163         int nr, slot;
8164         struct extent_buffer *eb;
8165
8166         if (root_level == 0)
8167                 return 1;
8168
8169         while (level <= root_level) {
8170                 eb = path->nodes[level];
8171                 nr = btrfs_header_nritems(eb);
8172                 path->slots[level]++;
8173                 slot = path->slots[level];
8174                 if (slot >= nr || level == 0) {
8175                         /*
8176                          * Don't free the root -  we will detect this
8177                          * condition after our loop and return a
8178                          * positive value for caller to stop walking the tree.
8179                          */
8180                         if (level != root_level) {
8181                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8182                                 path->locks[level] = 0;
8183
8184                                 free_extent_buffer(eb);
8185                                 path->nodes[level] = NULL;
8186                                 path->slots[level] = 0;
8187                         }
8188                 } else {
8189                         /*
8190                          * We have a valid slot to walk back down
8191                          * from. Stop here so caller can process these
8192                          * new nodes.
8193                          */
8194                         break;
8195                 }
8196
8197                 level++;
8198         }
8199
8200         eb = path->nodes[root_level];
8201         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8202                 return 1;
8203
8204         return 0;
8205 }
8206
8207 /*
8208  * root_eb is the subtree root and is locked before this function is called.
8209  * TODO: Modify this function to mark all (including complete shared node)
8210  * to dirty_extent_root to allow it get accounted in qgroup.
8211  */
8212 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8213                                   struct btrfs_root *root,
8214                                   struct extent_buffer *root_eb,
8215                                   u64 root_gen,
8216                                   int root_level)
8217 {
8218         int ret = 0;
8219         int level;
8220         struct extent_buffer *eb = root_eb;
8221         struct btrfs_path *path = NULL;
8222
8223         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8224         BUG_ON(root_eb == NULL);
8225
8226         if (!root->fs_info->quota_enabled)
8227                 return 0;
8228
8229         if (!extent_buffer_uptodate(root_eb)) {
8230                 ret = btrfs_read_buffer(root_eb, root_gen);
8231                 if (ret)
8232                         goto out;
8233         }
8234
8235         if (root_level == 0) {
8236                 ret = account_leaf_items(trans, root, root_eb);
8237                 goto out;
8238         }
8239
8240         path = btrfs_alloc_path();
8241         if (!path)
8242                 return -ENOMEM;
8243
8244         /*
8245          * Walk down the tree.  Missing extent blocks are filled in as
8246          * we go. Metadata is accounted every time we read a new
8247          * extent block.
8248          *
8249          * When we reach a leaf, we account for file extent items in it,
8250          * walk back up the tree (adjusting slot pointers as we go)
8251          * and restart the search process.
8252          */
8253         extent_buffer_get(root_eb); /* For path */
8254         path->nodes[root_level] = root_eb;
8255         path->slots[root_level] = 0;
8256         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8257 walk_down:
8258         level = root_level;
8259         while (level >= 0) {
8260                 if (path->nodes[level] == NULL) {
8261                         int parent_slot;
8262                         u64 child_gen;
8263                         u64 child_bytenr;
8264
8265                         /* We need to get child blockptr/gen from
8266                          * parent before we can read it. */
8267                         eb = path->nodes[level + 1];
8268                         parent_slot = path->slots[level + 1];
8269                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8270                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8271
8272                         eb = read_tree_block(root, child_bytenr, child_gen);
8273                         if (IS_ERR(eb)) {
8274                                 ret = PTR_ERR(eb);
8275                                 goto out;
8276                         } else if (!extent_buffer_uptodate(eb)) {
8277                                 free_extent_buffer(eb);
8278                                 ret = -EIO;
8279                                 goto out;
8280                         }
8281
8282                         path->nodes[level] = eb;
8283                         path->slots[level] = 0;
8284
8285                         btrfs_tree_read_lock(eb);
8286                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8287                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8288                 }
8289
8290                 if (level == 0) {
8291                         ret = account_leaf_items(trans, root, path->nodes[level]);
8292                         if (ret)
8293                                 goto out;
8294
8295                         /* Nonzero return here means we completed our search */
8296                         ret = adjust_slots_upwards(root, path, root_level);
8297                         if (ret)
8298                                 break;
8299
8300                         /* Restart search with new slots */
8301                         goto walk_down;
8302                 }
8303
8304                 level--;
8305         }
8306
8307         ret = 0;
8308 out:
8309         btrfs_free_path(path);
8310
8311         return ret;
8312 }
8313
8314 /*
8315  * helper to process tree block while walking down the tree.
8316  *
8317  * when wc->stage == UPDATE_BACKREF, this function updates
8318  * back refs for pointers in the block.
8319  *
8320  * NOTE: return value 1 means we should stop walking down.
8321  */
8322 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8323                                    struct btrfs_root *root,
8324                                    struct btrfs_path *path,
8325                                    struct walk_control *wc, int lookup_info)
8326 {
8327         int level = wc->level;
8328         struct extent_buffer *eb = path->nodes[level];
8329         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8330         int ret;
8331
8332         if (wc->stage == UPDATE_BACKREF &&
8333             btrfs_header_owner(eb) != root->root_key.objectid)
8334                 return 1;
8335
8336         /*
8337          * when reference count of tree block is 1, it won't increase
8338          * again. once full backref flag is set, we never clear it.
8339          */
8340         if (lookup_info &&
8341             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8342              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8343                 BUG_ON(!path->locks[level]);
8344                 ret = btrfs_lookup_extent_info(trans, root,
8345                                                eb->start, level, 1,
8346                                                &wc->refs[level],
8347                                                &wc->flags[level]);
8348                 BUG_ON(ret == -ENOMEM);
8349                 if (ret)
8350                         return ret;
8351                 BUG_ON(wc->refs[level] == 0);
8352         }
8353
8354         if (wc->stage == DROP_REFERENCE) {
8355                 if (wc->refs[level] > 1)
8356                         return 1;
8357
8358                 if (path->locks[level] && !wc->keep_locks) {
8359                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8360                         path->locks[level] = 0;
8361                 }
8362                 return 0;
8363         }
8364
8365         /* wc->stage == UPDATE_BACKREF */
8366         if (!(wc->flags[level] & flag)) {
8367                 BUG_ON(!path->locks[level]);
8368                 ret = btrfs_inc_ref(trans, root, eb, 1);
8369                 BUG_ON(ret); /* -ENOMEM */
8370                 ret = btrfs_dec_ref(trans, root, eb, 0);
8371                 BUG_ON(ret); /* -ENOMEM */
8372                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8373                                                   eb->len, flag,
8374                                                   btrfs_header_level(eb), 0);
8375                 BUG_ON(ret); /* -ENOMEM */
8376                 wc->flags[level] |= flag;
8377         }
8378
8379         /*
8380          * the block is shared by multiple trees, so it's not good to
8381          * keep the tree lock
8382          */
8383         if (path->locks[level] && level > 0) {
8384                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8385                 path->locks[level] = 0;
8386         }
8387         return 0;
8388 }
8389
8390 /*
8391  * helper to process tree block pointer.
8392  *
8393  * when wc->stage == DROP_REFERENCE, this function checks
8394  * reference count of the block pointed to. if the block
8395  * is shared and we need update back refs for the subtree
8396  * rooted at the block, this function changes wc->stage to
8397  * UPDATE_BACKREF. if the block is shared and there is no
8398  * need to update back, this function drops the reference
8399  * to the block.
8400  *
8401  * NOTE: return value 1 means we should stop walking down.
8402  */
8403 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8404                                  struct btrfs_root *root,
8405                                  struct btrfs_path *path,
8406                                  struct walk_control *wc, int *lookup_info)
8407 {
8408         u64 bytenr;
8409         u64 generation;
8410         u64 parent;
8411         u32 blocksize;
8412         struct btrfs_key key;
8413         struct extent_buffer *next;
8414         int level = wc->level;
8415         int reada = 0;
8416         int ret = 0;
8417         bool need_account = false;
8418
8419         generation = btrfs_node_ptr_generation(path->nodes[level],
8420                                                path->slots[level]);
8421         /*
8422          * if the lower level block was created before the snapshot
8423          * was created, we know there is no need to update back refs
8424          * for the subtree
8425          */
8426         if (wc->stage == UPDATE_BACKREF &&
8427             generation <= root->root_key.offset) {
8428                 *lookup_info = 1;
8429                 return 1;
8430         }
8431
8432         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8433         blocksize = root->nodesize;
8434
8435         next = btrfs_find_tree_block(root->fs_info, bytenr);
8436         if (!next) {
8437                 next = btrfs_find_create_tree_block(root, bytenr);
8438                 if (!next)
8439                         return -ENOMEM;
8440                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8441                                                level - 1);
8442                 reada = 1;
8443         }
8444         btrfs_tree_lock(next);
8445         btrfs_set_lock_blocking(next);
8446
8447         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8448                                        &wc->refs[level - 1],
8449                                        &wc->flags[level - 1]);
8450         if (ret < 0) {
8451                 btrfs_tree_unlock(next);
8452                 return ret;
8453         }
8454
8455         if (unlikely(wc->refs[level - 1] == 0)) {
8456                 btrfs_err(root->fs_info, "Missing references.");
8457                 BUG();
8458         }
8459         *lookup_info = 0;
8460
8461         if (wc->stage == DROP_REFERENCE) {
8462                 if (wc->refs[level - 1] > 1) {
8463                         need_account = true;
8464                         if (level == 1 &&
8465                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8466                                 goto skip;
8467
8468                         if (!wc->update_ref ||
8469                             generation <= root->root_key.offset)
8470                                 goto skip;
8471
8472                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8473                                               path->slots[level]);
8474                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8475                         if (ret < 0)
8476                                 goto skip;
8477
8478                         wc->stage = UPDATE_BACKREF;
8479                         wc->shared_level = level - 1;
8480                 }
8481         } else {
8482                 if (level == 1 &&
8483                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8484                         goto skip;
8485         }
8486
8487         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8488                 btrfs_tree_unlock(next);
8489                 free_extent_buffer(next);
8490                 next = NULL;
8491                 *lookup_info = 1;
8492         }
8493
8494         if (!next) {
8495                 if (reada && level == 1)
8496                         reada_walk_down(trans, root, wc, path);
8497                 next = read_tree_block(root, bytenr, generation);
8498                 if (IS_ERR(next)) {
8499                         return PTR_ERR(next);
8500                 } else if (!extent_buffer_uptodate(next)) {
8501                         free_extent_buffer(next);
8502                         return -EIO;
8503                 }
8504                 btrfs_tree_lock(next);
8505                 btrfs_set_lock_blocking(next);
8506         }
8507
8508         level--;
8509         BUG_ON(level != btrfs_header_level(next));
8510         path->nodes[level] = next;
8511         path->slots[level] = 0;
8512         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8513         wc->level = level;
8514         if (wc->level == 1)
8515                 wc->reada_slot = 0;
8516         return 0;
8517 skip:
8518         wc->refs[level - 1] = 0;
8519         wc->flags[level - 1] = 0;
8520         if (wc->stage == DROP_REFERENCE) {
8521                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8522                         parent = path->nodes[level]->start;
8523                 } else {
8524                         BUG_ON(root->root_key.objectid !=
8525                                btrfs_header_owner(path->nodes[level]));
8526                         parent = 0;
8527                 }
8528
8529                 if (need_account) {
8530                         ret = account_shared_subtree(trans, root, next,
8531                                                      generation, level - 1);
8532                         if (ret) {
8533                                 btrfs_err_rl(root->fs_info,
8534                                         "Error "
8535                                         "%d accounting shared subtree. Quota "
8536                                         "is out of sync, rescan required.",
8537                                         ret);
8538                         }
8539                 }
8540                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8541                                 root->root_key.objectid, level - 1, 0);
8542                 BUG_ON(ret); /* -ENOMEM */
8543         }
8544         btrfs_tree_unlock(next);
8545         free_extent_buffer(next);
8546         *lookup_info = 1;
8547         return 1;
8548 }
8549
8550 /*
8551  * helper to process tree block while walking up the tree.
8552  *
8553  * when wc->stage == DROP_REFERENCE, this function drops
8554  * reference count on the block.
8555  *
8556  * when wc->stage == UPDATE_BACKREF, this function changes
8557  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8558  * to UPDATE_BACKREF previously while processing the block.
8559  *
8560  * NOTE: return value 1 means we should stop walking up.
8561  */
8562 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8563                                  struct btrfs_root *root,
8564                                  struct btrfs_path *path,
8565                                  struct walk_control *wc)
8566 {
8567         int ret;
8568         int level = wc->level;
8569         struct extent_buffer *eb = path->nodes[level];
8570         u64 parent = 0;
8571
8572         if (wc->stage == UPDATE_BACKREF) {
8573                 BUG_ON(wc->shared_level < level);
8574                 if (level < wc->shared_level)
8575                         goto out;
8576
8577                 ret = find_next_key(path, level + 1, &wc->update_progress);
8578                 if (ret > 0)
8579                         wc->update_ref = 0;
8580
8581                 wc->stage = DROP_REFERENCE;
8582                 wc->shared_level = -1;
8583                 path->slots[level] = 0;
8584
8585                 /*
8586                  * check reference count again if the block isn't locked.
8587                  * we should start walking down the tree again if reference
8588                  * count is one.
8589                  */
8590                 if (!path->locks[level]) {
8591                         BUG_ON(level == 0);
8592                         btrfs_tree_lock(eb);
8593                         btrfs_set_lock_blocking(eb);
8594                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8595
8596                         ret = btrfs_lookup_extent_info(trans, root,
8597                                                        eb->start, level, 1,
8598                                                        &wc->refs[level],
8599                                                        &wc->flags[level]);
8600                         if (ret < 0) {
8601                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8602                                 path->locks[level] = 0;
8603                                 return ret;
8604                         }
8605                         BUG_ON(wc->refs[level] == 0);
8606                         if (wc->refs[level] == 1) {
8607                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8608                                 path->locks[level] = 0;
8609                                 return 1;
8610                         }
8611                 }
8612         }
8613
8614         /* wc->stage == DROP_REFERENCE */
8615         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8616
8617         if (wc->refs[level] == 1) {
8618                 if (level == 0) {
8619                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8620                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8621                         else
8622                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8623                         BUG_ON(ret); /* -ENOMEM */
8624                         ret = account_leaf_items(trans, root, eb);
8625                         if (ret) {
8626                                 btrfs_err_rl(root->fs_info,
8627                                         "error "
8628                                         "%d accounting leaf items. Quota "
8629                                         "is out of sync, rescan required.",
8630                                         ret);
8631                         }
8632                 }
8633                 /* make block locked assertion in clean_tree_block happy */
8634                 if (!path->locks[level] &&
8635                     btrfs_header_generation(eb) == trans->transid) {
8636                         btrfs_tree_lock(eb);
8637                         btrfs_set_lock_blocking(eb);
8638                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8639                 }
8640                 clean_tree_block(trans, root->fs_info, eb);
8641         }
8642
8643         if (eb == root->node) {
8644                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8645                         parent = eb->start;
8646                 else
8647                         BUG_ON(root->root_key.objectid !=
8648                                btrfs_header_owner(eb));
8649         } else {
8650                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8651                         parent = path->nodes[level + 1]->start;
8652                 else
8653                         BUG_ON(root->root_key.objectid !=
8654                                btrfs_header_owner(path->nodes[level + 1]));
8655         }
8656
8657         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8658 out:
8659         wc->refs[level] = 0;
8660         wc->flags[level] = 0;
8661         return 0;
8662 }
8663
8664 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8665                                    struct btrfs_root *root,
8666                                    struct btrfs_path *path,
8667                                    struct walk_control *wc)
8668 {
8669         int level = wc->level;
8670         int lookup_info = 1;
8671         int ret;
8672
8673         while (level >= 0) {
8674                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8675                 if (ret > 0)
8676                         break;
8677
8678                 if (level == 0)
8679                         break;
8680
8681                 if (path->slots[level] >=
8682                     btrfs_header_nritems(path->nodes[level]))
8683                         break;
8684
8685                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8686                 if (ret > 0) {
8687                         path->slots[level]++;
8688                         continue;
8689                 } else if (ret < 0)
8690                         return ret;
8691                 level = wc->level;
8692         }
8693         return 0;
8694 }
8695
8696 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8697                                  struct btrfs_root *root,
8698                                  struct btrfs_path *path,
8699                                  struct walk_control *wc, int max_level)
8700 {
8701         int level = wc->level;
8702         int ret;
8703
8704         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8705         while (level < max_level && path->nodes[level]) {
8706                 wc->level = level;
8707                 if (path->slots[level] + 1 <
8708                     btrfs_header_nritems(path->nodes[level])) {
8709                         path->slots[level]++;
8710                         return 0;
8711                 } else {
8712                         ret = walk_up_proc(trans, root, path, wc);
8713                         if (ret > 0)
8714                                 return 0;
8715
8716                         if (path->locks[level]) {
8717                                 btrfs_tree_unlock_rw(path->nodes[level],
8718                                                      path->locks[level]);
8719                                 path->locks[level] = 0;
8720                         }
8721                         free_extent_buffer(path->nodes[level]);
8722                         path->nodes[level] = NULL;
8723                         level++;
8724                 }
8725         }
8726         return 1;
8727 }
8728
8729 /*
8730  * drop a subvolume tree.
8731  *
8732  * this function traverses the tree freeing any blocks that only
8733  * referenced by the tree.
8734  *
8735  * when a shared tree block is found. this function decreases its
8736  * reference count by one. if update_ref is true, this function
8737  * also make sure backrefs for the shared block and all lower level
8738  * blocks are properly updated.
8739  *
8740  * If called with for_reloc == 0, may exit early with -EAGAIN
8741  */
8742 int btrfs_drop_snapshot(struct btrfs_root *root,
8743                          struct btrfs_block_rsv *block_rsv, int update_ref,
8744                          int for_reloc)
8745 {
8746         struct btrfs_path *path;
8747         struct btrfs_trans_handle *trans;
8748         struct btrfs_root *tree_root = root->fs_info->tree_root;
8749         struct btrfs_root_item *root_item = &root->root_item;
8750         struct walk_control *wc;
8751         struct btrfs_key key;
8752         int err = 0;
8753         int ret;
8754         int level;
8755         bool root_dropped = false;
8756
8757         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8758
8759         path = btrfs_alloc_path();
8760         if (!path) {
8761                 err = -ENOMEM;
8762                 goto out;
8763         }
8764
8765         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8766         if (!wc) {
8767                 btrfs_free_path(path);
8768                 err = -ENOMEM;
8769                 goto out;
8770         }
8771
8772         trans = btrfs_start_transaction(tree_root, 0);
8773         if (IS_ERR(trans)) {
8774                 err = PTR_ERR(trans);
8775                 goto out_free;
8776         }
8777
8778         if (block_rsv)
8779                 trans->block_rsv = block_rsv;
8780
8781         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8782                 level = btrfs_header_level(root->node);
8783                 path->nodes[level] = btrfs_lock_root_node(root);
8784                 btrfs_set_lock_blocking(path->nodes[level]);
8785                 path->slots[level] = 0;
8786                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8787                 memset(&wc->update_progress, 0,
8788                        sizeof(wc->update_progress));
8789         } else {
8790                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8791                 memcpy(&wc->update_progress, &key,
8792                        sizeof(wc->update_progress));
8793
8794                 level = root_item->drop_level;
8795                 BUG_ON(level == 0);
8796                 path->lowest_level = level;
8797                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8798                 path->lowest_level = 0;
8799                 if (ret < 0) {
8800                         err = ret;
8801                         goto out_end_trans;
8802                 }
8803                 WARN_ON(ret > 0);
8804
8805                 /*
8806                  * unlock our path, this is safe because only this
8807                  * function is allowed to delete this snapshot
8808                  */
8809                 btrfs_unlock_up_safe(path, 0);
8810
8811                 level = btrfs_header_level(root->node);
8812                 while (1) {
8813                         btrfs_tree_lock(path->nodes[level]);
8814                         btrfs_set_lock_blocking(path->nodes[level]);
8815                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8816
8817                         ret = btrfs_lookup_extent_info(trans, root,
8818                                                 path->nodes[level]->start,
8819                                                 level, 1, &wc->refs[level],
8820                                                 &wc->flags[level]);
8821                         if (ret < 0) {
8822                                 err = ret;
8823                                 goto out_end_trans;
8824                         }
8825                         BUG_ON(wc->refs[level] == 0);
8826
8827                         if (level == root_item->drop_level)
8828                                 break;
8829
8830                         btrfs_tree_unlock(path->nodes[level]);
8831                         path->locks[level] = 0;
8832                         WARN_ON(wc->refs[level] != 1);
8833                         level--;
8834                 }
8835         }
8836
8837         wc->level = level;
8838         wc->shared_level = -1;
8839         wc->stage = DROP_REFERENCE;
8840         wc->update_ref = update_ref;
8841         wc->keep_locks = 0;
8842         wc->for_reloc = for_reloc;
8843         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8844
8845         while (1) {
8846
8847                 ret = walk_down_tree(trans, root, path, wc);
8848                 if (ret < 0) {
8849                         err = ret;
8850                         break;
8851                 }
8852
8853                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8854                 if (ret < 0) {
8855                         err = ret;
8856                         break;
8857                 }
8858
8859                 if (ret > 0) {
8860                         BUG_ON(wc->stage != DROP_REFERENCE);
8861                         break;
8862                 }
8863
8864                 if (wc->stage == DROP_REFERENCE) {
8865                         level = wc->level;
8866                         btrfs_node_key(path->nodes[level],
8867                                        &root_item->drop_progress,
8868                                        path->slots[level]);
8869                         root_item->drop_level = level;
8870                 }
8871
8872                 BUG_ON(wc->level == 0);
8873                 if (btrfs_should_end_transaction(trans, tree_root) ||
8874                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8875                         ret = btrfs_update_root(trans, tree_root,
8876                                                 &root->root_key,
8877                                                 root_item);
8878                         if (ret) {
8879                                 btrfs_abort_transaction(trans, tree_root, ret);
8880                                 err = ret;
8881                                 goto out_end_trans;
8882                         }
8883
8884                         btrfs_end_transaction_throttle(trans, tree_root);
8885                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8886                                 pr_debug("BTRFS: drop snapshot early exit\n");
8887                                 err = -EAGAIN;
8888                                 goto out_free;
8889                         }
8890
8891                         trans = btrfs_start_transaction(tree_root, 0);
8892                         if (IS_ERR(trans)) {
8893                                 err = PTR_ERR(trans);
8894                                 goto out_free;
8895                         }
8896                         if (block_rsv)
8897                                 trans->block_rsv = block_rsv;
8898                 }
8899         }
8900         btrfs_release_path(path);
8901         if (err)
8902                 goto out_end_trans;
8903
8904         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8905         if (ret) {
8906                 btrfs_abort_transaction(trans, tree_root, ret);
8907                 goto out_end_trans;
8908         }
8909
8910         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8911                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8912                                       NULL, NULL);
8913                 if (ret < 0) {
8914                         btrfs_abort_transaction(trans, tree_root, ret);
8915                         err = ret;
8916                         goto out_end_trans;
8917                 } else if (ret > 0) {
8918                         /* if we fail to delete the orphan item this time
8919                          * around, it'll get picked up the next time.
8920                          *
8921                          * The most common failure here is just -ENOENT.
8922                          */
8923                         btrfs_del_orphan_item(trans, tree_root,
8924                                               root->root_key.objectid);
8925                 }
8926         }
8927
8928         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8929                 btrfs_add_dropped_root(trans, root);
8930         } else {
8931                 free_extent_buffer(root->node);
8932                 free_extent_buffer(root->commit_root);
8933                 btrfs_put_fs_root(root);
8934         }
8935         root_dropped = true;
8936 out_end_trans:
8937         btrfs_end_transaction_throttle(trans, tree_root);
8938 out_free:
8939         kfree(wc);
8940         btrfs_free_path(path);
8941 out:
8942         /*
8943          * So if we need to stop dropping the snapshot for whatever reason we
8944          * need to make sure to add it back to the dead root list so that we
8945          * keep trying to do the work later.  This also cleans up roots if we
8946          * don't have it in the radix (like when we recover after a power fail
8947          * or unmount) so we don't leak memory.
8948          */
8949         if (!for_reloc && root_dropped == false)
8950                 btrfs_add_dead_root(root);
8951         if (err && err != -EAGAIN)
8952                 btrfs_std_error(root->fs_info, err, NULL);
8953         return err;
8954 }
8955
8956 /*
8957  * drop subtree rooted at tree block 'node'.
8958  *
8959  * NOTE: this function will unlock and release tree block 'node'
8960  * only used by relocation code
8961  */
8962 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8963                         struct btrfs_root *root,
8964                         struct extent_buffer *node,
8965                         struct extent_buffer *parent)
8966 {
8967         struct btrfs_path *path;
8968         struct walk_control *wc;
8969         int level;
8970         int parent_level;
8971         int ret = 0;
8972         int wret;
8973
8974         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8975
8976         path = btrfs_alloc_path();
8977         if (!path)
8978                 return -ENOMEM;
8979
8980         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8981         if (!wc) {
8982                 btrfs_free_path(path);
8983                 return -ENOMEM;
8984         }
8985
8986         btrfs_assert_tree_locked(parent);
8987         parent_level = btrfs_header_level(parent);
8988         extent_buffer_get(parent);
8989         path->nodes[parent_level] = parent;
8990         path->slots[parent_level] = btrfs_header_nritems(parent);
8991
8992         btrfs_assert_tree_locked(node);
8993         level = btrfs_header_level(node);
8994         path->nodes[level] = node;
8995         path->slots[level] = 0;
8996         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8997
8998         wc->refs[parent_level] = 1;
8999         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9000         wc->level = level;
9001         wc->shared_level = -1;
9002         wc->stage = DROP_REFERENCE;
9003         wc->update_ref = 0;
9004         wc->keep_locks = 1;
9005         wc->for_reloc = 1;
9006         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9007
9008         while (1) {
9009                 wret = walk_down_tree(trans, root, path, wc);
9010                 if (wret < 0) {
9011                         ret = wret;
9012                         break;
9013                 }
9014
9015                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9016                 if (wret < 0)
9017                         ret = wret;
9018                 if (wret != 0)
9019                         break;
9020         }
9021
9022         kfree(wc);
9023         btrfs_free_path(path);
9024         return ret;
9025 }
9026
9027 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9028 {
9029         u64 num_devices;
9030         u64 stripped;
9031
9032         /*
9033          * if restripe for this chunk_type is on pick target profile and
9034          * return, otherwise do the usual balance
9035          */
9036         stripped = get_restripe_target(root->fs_info, flags);
9037         if (stripped)
9038                 return extended_to_chunk(stripped);
9039
9040         num_devices = root->fs_info->fs_devices->rw_devices;
9041
9042         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9043                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9044                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9045
9046         if (num_devices == 1) {
9047                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9048                 stripped = flags & ~stripped;
9049
9050                 /* turn raid0 into single device chunks */
9051                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9052                         return stripped;
9053
9054                 /* turn mirroring into duplication */
9055                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9056                              BTRFS_BLOCK_GROUP_RAID10))
9057                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9058         } else {
9059                 /* they already had raid on here, just return */
9060                 if (flags & stripped)
9061                         return flags;
9062
9063                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9064                 stripped = flags & ~stripped;
9065
9066                 /* switch duplicated blocks with raid1 */
9067                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9068                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9069
9070                 /* this is drive concat, leave it alone */
9071         }
9072
9073         return flags;
9074 }
9075
9076 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9077 {
9078         struct btrfs_space_info *sinfo = cache->space_info;
9079         u64 num_bytes;
9080         u64 min_allocable_bytes;
9081         int ret = -ENOSPC;
9082
9083         /*
9084          * We need some metadata space and system metadata space for
9085          * allocating chunks in some corner cases until we force to set
9086          * it to be readonly.
9087          */
9088         if ((sinfo->flags &
9089              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9090             !force)
9091                 min_allocable_bytes = 1 * 1024 * 1024;
9092         else
9093                 min_allocable_bytes = 0;
9094
9095         spin_lock(&sinfo->lock);
9096         spin_lock(&cache->lock);
9097
9098         if (cache->ro) {
9099                 cache->ro++;
9100                 ret = 0;
9101                 goto out;
9102         }
9103
9104         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9105                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9106
9107         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9108             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9109             min_allocable_bytes <= sinfo->total_bytes) {
9110                 sinfo->bytes_readonly += num_bytes;
9111                 cache->ro++;
9112                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9113                 ret = 0;
9114         }
9115 out:
9116         spin_unlock(&cache->lock);
9117         spin_unlock(&sinfo->lock);
9118         return ret;
9119 }
9120
9121 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9122                              struct btrfs_block_group_cache *cache)
9123
9124 {
9125         struct btrfs_trans_handle *trans;
9126         u64 alloc_flags;
9127         int ret;
9128
9129 again:
9130         trans = btrfs_join_transaction(root);
9131         if (IS_ERR(trans))
9132                 return PTR_ERR(trans);
9133
9134         /*
9135          * we're not allowed to set block groups readonly after the dirty
9136          * block groups cache has started writing.  If it already started,
9137          * back off and let this transaction commit
9138          */
9139         mutex_lock(&root->fs_info->ro_block_group_mutex);
9140         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9141                 u64 transid = trans->transid;
9142
9143                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9144                 btrfs_end_transaction(trans, root);
9145
9146                 ret = btrfs_wait_for_commit(root, transid);
9147                 if (ret)
9148                         return ret;
9149                 goto again;
9150         }
9151
9152         /*
9153          * if we are changing raid levels, try to allocate a corresponding
9154          * block group with the new raid level.
9155          */
9156         alloc_flags = update_block_group_flags(root, cache->flags);
9157         if (alloc_flags != cache->flags) {
9158                 ret = do_chunk_alloc(trans, root, alloc_flags,
9159                                      CHUNK_ALLOC_FORCE);
9160                 /*
9161                  * ENOSPC is allowed here, we may have enough space
9162                  * already allocated at the new raid level to
9163                  * carry on
9164                  */
9165                 if (ret == -ENOSPC)
9166                         ret = 0;
9167                 if (ret < 0)
9168                         goto out;
9169         }
9170
9171         ret = inc_block_group_ro(cache, 0);
9172         if (!ret)
9173                 goto out;
9174         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9175         ret = do_chunk_alloc(trans, root, alloc_flags,
9176                              CHUNK_ALLOC_FORCE);
9177         if (ret < 0)
9178                 goto out;
9179         ret = inc_block_group_ro(cache, 0);
9180 out:
9181         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9182                 alloc_flags = update_block_group_flags(root, cache->flags);
9183                 lock_chunks(root->fs_info->chunk_root);
9184                 check_system_chunk(trans, root, alloc_flags);
9185                 unlock_chunks(root->fs_info->chunk_root);
9186         }
9187         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9188
9189         btrfs_end_transaction(trans, root);
9190         return ret;
9191 }
9192
9193 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9194                             struct btrfs_root *root, u64 type)
9195 {
9196         u64 alloc_flags = get_alloc_profile(root, type);
9197         return do_chunk_alloc(trans, root, alloc_flags,
9198                               CHUNK_ALLOC_FORCE);
9199 }
9200
9201 /*
9202  * helper to account the unused space of all the readonly block group in the
9203  * space_info. takes mirrors into account.
9204  */
9205 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9206 {
9207         struct btrfs_block_group_cache *block_group;
9208         u64 free_bytes = 0;
9209         int factor;
9210
9211         /* It's df, we don't care if it's racey */
9212         if (list_empty(&sinfo->ro_bgs))
9213                 return 0;
9214
9215         spin_lock(&sinfo->lock);
9216         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9217                 spin_lock(&block_group->lock);
9218
9219                 if (!block_group->ro) {
9220                         spin_unlock(&block_group->lock);
9221                         continue;
9222                 }
9223
9224                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9225                                           BTRFS_BLOCK_GROUP_RAID10 |
9226                                           BTRFS_BLOCK_GROUP_DUP))
9227                         factor = 2;
9228                 else
9229                         factor = 1;
9230
9231                 free_bytes += (block_group->key.offset -
9232                                btrfs_block_group_used(&block_group->item)) *
9233                                factor;
9234
9235                 spin_unlock(&block_group->lock);
9236         }
9237         spin_unlock(&sinfo->lock);
9238
9239         return free_bytes;
9240 }
9241
9242 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9243                               struct btrfs_block_group_cache *cache)
9244 {
9245         struct btrfs_space_info *sinfo = cache->space_info;
9246         u64 num_bytes;
9247
9248         BUG_ON(!cache->ro);
9249
9250         spin_lock(&sinfo->lock);
9251         spin_lock(&cache->lock);
9252         if (!--cache->ro) {
9253                 num_bytes = cache->key.offset - cache->reserved -
9254                             cache->pinned - cache->bytes_super -
9255                             btrfs_block_group_used(&cache->item);
9256                 sinfo->bytes_readonly -= num_bytes;
9257                 list_del_init(&cache->ro_list);
9258         }
9259         spin_unlock(&cache->lock);
9260         spin_unlock(&sinfo->lock);
9261 }
9262
9263 /*
9264  * checks to see if its even possible to relocate this block group.
9265  *
9266  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9267  * ok to go ahead and try.
9268  */
9269 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9270 {
9271         struct btrfs_block_group_cache *block_group;
9272         struct btrfs_space_info *space_info;
9273         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9274         struct btrfs_device *device;
9275         struct btrfs_trans_handle *trans;
9276         u64 min_free;
9277         u64 dev_min = 1;
9278         u64 dev_nr = 0;
9279         u64 target;
9280         int index;
9281         int full = 0;
9282         int ret = 0;
9283
9284         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9285
9286         /* odd, couldn't find the block group, leave it alone */
9287         if (!block_group)
9288                 return -1;
9289
9290         min_free = btrfs_block_group_used(&block_group->item);
9291
9292         /* no bytes used, we're good */
9293         if (!min_free)
9294                 goto out;
9295
9296         space_info = block_group->space_info;
9297         spin_lock(&space_info->lock);
9298
9299         full = space_info->full;
9300
9301         /*
9302          * if this is the last block group we have in this space, we can't
9303          * relocate it unless we're able to allocate a new chunk below.
9304          *
9305          * Otherwise, we need to make sure we have room in the space to handle
9306          * all of the extents from this block group.  If we can, we're good
9307          */
9308         if ((space_info->total_bytes != block_group->key.offset) &&
9309             (space_info->bytes_used + space_info->bytes_reserved +
9310              space_info->bytes_pinned + space_info->bytes_readonly +
9311              min_free < space_info->total_bytes)) {
9312                 spin_unlock(&space_info->lock);
9313                 goto out;
9314         }
9315         spin_unlock(&space_info->lock);
9316
9317         /*
9318          * ok we don't have enough space, but maybe we have free space on our
9319          * devices to allocate new chunks for relocation, so loop through our
9320          * alloc devices and guess if we have enough space.  if this block
9321          * group is going to be restriped, run checks against the target
9322          * profile instead of the current one.
9323          */
9324         ret = -1;
9325
9326         /*
9327          * index:
9328          *      0: raid10
9329          *      1: raid1
9330          *      2: dup
9331          *      3: raid0
9332          *      4: single
9333          */
9334         target = get_restripe_target(root->fs_info, block_group->flags);
9335         if (target) {
9336                 index = __get_raid_index(extended_to_chunk(target));
9337         } else {
9338                 /*
9339                  * this is just a balance, so if we were marked as full
9340                  * we know there is no space for a new chunk
9341                  */
9342                 if (full)
9343                         goto out;
9344
9345                 index = get_block_group_index(block_group);
9346         }
9347
9348         if (index == BTRFS_RAID_RAID10) {
9349                 dev_min = 4;
9350                 /* Divide by 2 */
9351                 min_free >>= 1;
9352         } else if (index == BTRFS_RAID_RAID1) {
9353                 dev_min = 2;
9354         } else if (index == BTRFS_RAID_DUP) {
9355                 /* Multiply by 2 */
9356                 min_free <<= 1;
9357         } else if (index == BTRFS_RAID_RAID0) {
9358                 dev_min = fs_devices->rw_devices;
9359                 min_free = div64_u64(min_free, dev_min);
9360         }
9361
9362         /* We need to do this so that we can look at pending chunks */
9363         trans = btrfs_join_transaction(root);
9364         if (IS_ERR(trans)) {
9365                 ret = PTR_ERR(trans);
9366                 goto out;
9367         }
9368
9369         mutex_lock(&root->fs_info->chunk_mutex);
9370         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9371                 u64 dev_offset;
9372
9373                 /*
9374                  * check to make sure we can actually find a chunk with enough
9375                  * space to fit our block group in.
9376                  */
9377                 if (device->total_bytes > device->bytes_used + min_free &&
9378                     !device->is_tgtdev_for_dev_replace) {
9379                         ret = find_free_dev_extent(trans, device, min_free,
9380                                                    &dev_offset, NULL);
9381                         if (!ret)
9382                                 dev_nr++;
9383
9384                         if (dev_nr >= dev_min)
9385                                 break;
9386
9387                         ret = -1;
9388                 }
9389         }
9390         mutex_unlock(&root->fs_info->chunk_mutex);
9391         btrfs_end_transaction(trans, root);
9392 out:
9393         btrfs_put_block_group(block_group);
9394         return ret;
9395 }
9396
9397 static int find_first_block_group(struct btrfs_root *root,
9398                 struct btrfs_path *path, struct btrfs_key *key)
9399 {
9400         int ret = 0;
9401         struct btrfs_key found_key;
9402         struct extent_buffer *leaf;
9403         int slot;
9404
9405         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9406         if (ret < 0)
9407                 goto out;
9408
9409         while (1) {
9410                 slot = path->slots[0];
9411                 leaf = path->nodes[0];
9412                 if (slot >= btrfs_header_nritems(leaf)) {
9413                         ret = btrfs_next_leaf(root, path);
9414                         if (ret == 0)
9415                                 continue;
9416                         if (ret < 0)
9417                                 goto out;
9418                         break;
9419                 }
9420                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9421
9422                 if (found_key.objectid >= key->objectid &&
9423                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9424                         ret = 0;
9425                         goto out;
9426                 }
9427                 path->slots[0]++;
9428         }
9429 out:
9430         return ret;
9431 }
9432
9433 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9434 {
9435         struct btrfs_block_group_cache *block_group;
9436         u64 last = 0;
9437
9438         while (1) {
9439                 struct inode *inode;
9440
9441                 block_group = btrfs_lookup_first_block_group(info, last);
9442                 while (block_group) {
9443                         spin_lock(&block_group->lock);
9444                         if (block_group->iref)
9445                                 break;
9446                         spin_unlock(&block_group->lock);
9447                         block_group = next_block_group(info->tree_root,
9448                                                        block_group);
9449                 }
9450                 if (!block_group) {
9451                         if (last == 0)
9452                                 break;
9453                         last = 0;
9454                         continue;
9455                 }
9456
9457                 inode = block_group->inode;
9458                 block_group->iref = 0;
9459                 block_group->inode = NULL;
9460                 spin_unlock(&block_group->lock);
9461                 iput(inode);
9462                 last = block_group->key.objectid + block_group->key.offset;
9463                 btrfs_put_block_group(block_group);
9464         }
9465 }
9466
9467 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9468 {
9469         struct btrfs_block_group_cache *block_group;
9470         struct btrfs_space_info *space_info;
9471         struct btrfs_caching_control *caching_ctl;
9472         struct rb_node *n;
9473
9474         down_write(&info->commit_root_sem);
9475         while (!list_empty(&info->caching_block_groups)) {
9476                 caching_ctl = list_entry(info->caching_block_groups.next,
9477                                          struct btrfs_caching_control, list);
9478                 list_del(&caching_ctl->list);
9479                 put_caching_control(caching_ctl);
9480         }
9481         up_write(&info->commit_root_sem);
9482
9483         spin_lock(&info->unused_bgs_lock);
9484         while (!list_empty(&info->unused_bgs)) {
9485                 block_group = list_first_entry(&info->unused_bgs,
9486                                                struct btrfs_block_group_cache,
9487                                                bg_list);
9488                 list_del_init(&block_group->bg_list);
9489                 btrfs_put_block_group(block_group);
9490         }
9491         spin_unlock(&info->unused_bgs_lock);
9492
9493         spin_lock(&info->block_group_cache_lock);
9494         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9495                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9496                                        cache_node);
9497                 rb_erase(&block_group->cache_node,
9498                          &info->block_group_cache_tree);
9499                 RB_CLEAR_NODE(&block_group->cache_node);
9500                 spin_unlock(&info->block_group_cache_lock);
9501
9502                 down_write(&block_group->space_info->groups_sem);
9503                 list_del(&block_group->list);
9504                 up_write(&block_group->space_info->groups_sem);
9505
9506                 if (block_group->cached == BTRFS_CACHE_STARTED)
9507                         wait_block_group_cache_done(block_group);
9508
9509                 /*
9510                  * We haven't cached this block group, which means we could
9511                  * possibly have excluded extents on this block group.
9512                  */
9513                 if (block_group->cached == BTRFS_CACHE_NO ||
9514                     block_group->cached == BTRFS_CACHE_ERROR)
9515                         free_excluded_extents(info->extent_root, block_group);
9516
9517                 btrfs_remove_free_space_cache(block_group);
9518                 btrfs_put_block_group(block_group);
9519
9520                 spin_lock(&info->block_group_cache_lock);
9521         }
9522         spin_unlock(&info->block_group_cache_lock);
9523
9524         /* now that all the block groups are freed, go through and
9525          * free all the space_info structs.  This is only called during
9526          * the final stages of unmount, and so we know nobody is
9527          * using them.  We call synchronize_rcu() once before we start,
9528          * just to be on the safe side.
9529          */
9530         synchronize_rcu();
9531
9532         release_global_block_rsv(info);
9533
9534         while (!list_empty(&info->space_info)) {
9535                 int i;
9536
9537                 space_info = list_entry(info->space_info.next,
9538                                         struct btrfs_space_info,
9539                                         list);
9540                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9541                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9542                             space_info->bytes_reserved > 0 ||
9543                             space_info->bytes_may_use > 0)) {
9544                                 dump_space_info(space_info, 0, 0);
9545                         }
9546                 }
9547                 list_del(&space_info->list);
9548                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9549                         struct kobject *kobj;
9550                         kobj = space_info->block_group_kobjs[i];
9551                         space_info->block_group_kobjs[i] = NULL;
9552                         if (kobj) {
9553                                 kobject_del(kobj);
9554                                 kobject_put(kobj);
9555                         }
9556                 }
9557                 kobject_del(&space_info->kobj);
9558                 kobject_put(&space_info->kobj);
9559         }
9560         return 0;
9561 }
9562
9563 static void __link_block_group(struct btrfs_space_info *space_info,
9564                                struct btrfs_block_group_cache *cache)
9565 {
9566         int index = get_block_group_index(cache);
9567         bool first = false;
9568
9569         down_write(&space_info->groups_sem);
9570         if (list_empty(&space_info->block_groups[index]))
9571                 first = true;
9572         list_add_tail(&cache->list, &space_info->block_groups[index]);
9573         up_write(&space_info->groups_sem);
9574
9575         if (first) {
9576                 struct raid_kobject *rkobj;
9577                 int ret;
9578
9579                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9580                 if (!rkobj)
9581                         goto out_err;
9582                 rkobj->raid_type = index;
9583                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9584                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9585                                   "%s", get_raid_name(index));
9586                 if (ret) {
9587                         kobject_put(&rkobj->kobj);
9588                         goto out_err;
9589                 }
9590                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9591         }
9592
9593         return;
9594 out_err:
9595         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9596 }
9597
9598 static struct btrfs_block_group_cache *
9599 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9600 {
9601         struct btrfs_block_group_cache *cache;
9602
9603         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9604         if (!cache)
9605                 return NULL;
9606
9607         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9608                                         GFP_NOFS);
9609         if (!cache->free_space_ctl) {
9610                 kfree(cache);
9611                 return NULL;
9612         }
9613
9614         cache->key.objectid = start;
9615         cache->key.offset = size;
9616         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9617
9618         cache->sectorsize = root->sectorsize;
9619         cache->fs_info = root->fs_info;
9620         cache->full_stripe_len = btrfs_full_stripe_len(root,
9621                                                &root->fs_info->mapping_tree,
9622                                                start);
9623         atomic_set(&cache->count, 1);
9624         spin_lock_init(&cache->lock);
9625         init_rwsem(&cache->data_rwsem);
9626         INIT_LIST_HEAD(&cache->list);
9627         INIT_LIST_HEAD(&cache->cluster_list);
9628         INIT_LIST_HEAD(&cache->bg_list);
9629         INIT_LIST_HEAD(&cache->ro_list);
9630         INIT_LIST_HEAD(&cache->dirty_list);
9631         INIT_LIST_HEAD(&cache->io_list);
9632         btrfs_init_free_space_ctl(cache);
9633         atomic_set(&cache->trimming, 0);
9634
9635         return cache;
9636 }
9637
9638 int btrfs_read_block_groups(struct btrfs_root *root)
9639 {
9640         struct btrfs_path *path;
9641         int ret;
9642         struct btrfs_block_group_cache *cache;
9643         struct btrfs_fs_info *info = root->fs_info;
9644         struct btrfs_space_info *space_info;
9645         struct btrfs_key key;
9646         struct btrfs_key found_key;
9647         struct extent_buffer *leaf;
9648         int need_clear = 0;
9649         u64 cache_gen;
9650
9651         root = info->extent_root;
9652         key.objectid = 0;
9653         key.offset = 0;
9654         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9655         path = btrfs_alloc_path();
9656         if (!path)
9657                 return -ENOMEM;
9658         path->reada = 1;
9659
9660         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9661         if (btrfs_test_opt(root, SPACE_CACHE) &&
9662             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9663                 need_clear = 1;
9664         if (btrfs_test_opt(root, CLEAR_CACHE))
9665                 need_clear = 1;
9666
9667         while (1) {
9668                 ret = find_first_block_group(root, path, &key);
9669                 if (ret > 0)
9670                         break;
9671                 if (ret != 0)
9672                         goto error;
9673
9674                 leaf = path->nodes[0];
9675                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9676
9677                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9678                                                        found_key.offset);
9679                 if (!cache) {
9680                         ret = -ENOMEM;
9681                         goto error;
9682                 }
9683
9684                 if (need_clear) {
9685                         /*
9686                          * When we mount with old space cache, we need to
9687                          * set BTRFS_DC_CLEAR and set dirty flag.
9688                          *
9689                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9690                          *    truncate the old free space cache inode and
9691                          *    setup a new one.
9692                          * b) Setting 'dirty flag' makes sure that we flush
9693                          *    the new space cache info onto disk.
9694                          */
9695                         if (btrfs_test_opt(root, SPACE_CACHE))
9696                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9697                 }
9698
9699                 read_extent_buffer(leaf, &cache->item,
9700                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9701                                    sizeof(cache->item));
9702                 cache->flags = btrfs_block_group_flags(&cache->item);
9703
9704                 key.objectid = found_key.objectid + found_key.offset;
9705                 btrfs_release_path(path);
9706
9707                 /*
9708                  * We need to exclude the super stripes now so that the space
9709                  * info has super bytes accounted for, otherwise we'll think
9710                  * we have more space than we actually do.
9711                  */
9712                 ret = exclude_super_stripes(root, cache);
9713                 if (ret) {
9714                         /*
9715                          * We may have excluded something, so call this just in
9716                          * case.
9717                          */
9718                         free_excluded_extents(root, cache);
9719                         btrfs_put_block_group(cache);
9720                         goto error;
9721                 }
9722
9723                 /*
9724                  * check for two cases, either we are full, and therefore
9725                  * don't need to bother with the caching work since we won't
9726                  * find any space, or we are empty, and we can just add all
9727                  * the space in and be done with it.  This saves us _alot_ of
9728                  * time, particularly in the full case.
9729                  */
9730                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9731                         cache->last_byte_to_unpin = (u64)-1;
9732                         cache->cached = BTRFS_CACHE_FINISHED;
9733                         free_excluded_extents(root, cache);
9734                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9735                         cache->last_byte_to_unpin = (u64)-1;
9736                         cache->cached = BTRFS_CACHE_FINISHED;
9737                         add_new_free_space(cache, root->fs_info,
9738                                            found_key.objectid,
9739                                            found_key.objectid +
9740                                            found_key.offset);
9741                         free_excluded_extents(root, cache);
9742                 }
9743
9744                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9745                 if (ret) {
9746                         btrfs_remove_free_space_cache(cache);
9747                         btrfs_put_block_group(cache);
9748                         goto error;
9749                 }
9750
9751                 ret = update_space_info(info, cache->flags, found_key.offset,
9752                                         btrfs_block_group_used(&cache->item),
9753                                         &space_info);
9754                 if (ret) {
9755                         btrfs_remove_free_space_cache(cache);
9756                         spin_lock(&info->block_group_cache_lock);
9757                         rb_erase(&cache->cache_node,
9758                                  &info->block_group_cache_tree);
9759                         RB_CLEAR_NODE(&cache->cache_node);
9760                         spin_unlock(&info->block_group_cache_lock);
9761                         btrfs_put_block_group(cache);
9762                         goto error;
9763                 }
9764
9765                 cache->space_info = space_info;
9766                 spin_lock(&cache->space_info->lock);
9767                 cache->space_info->bytes_readonly += cache->bytes_super;
9768                 spin_unlock(&cache->space_info->lock);
9769
9770                 __link_block_group(space_info, cache);
9771
9772                 set_avail_alloc_bits(root->fs_info, cache->flags);
9773                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9774                         inc_block_group_ro(cache, 1);
9775                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9776                         spin_lock(&info->unused_bgs_lock);
9777                         /* Should always be true but just in case. */
9778                         if (list_empty(&cache->bg_list)) {
9779                                 btrfs_get_block_group(cache);
9780                                 list_add_tail(&cache->bg_list,
9781                                               &info->unused_bgs);
9782                         }
9783                         spin_unlock(&info->unused_bgs_lock);
9784                 }
9785         }
9786
9787         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9788                 if (!(get_alloc_profile(root, space_info->flags) &
9789                       (BTRFS_BLOCK_GROUP_RAID10 |
9790                        BTRFS_BLOCK_GROUP_RAID1 |
9791                        BTRFS_BLOCK_GROUP_RAID5 |
9792                        BTRFS_BLOCK_GROUP_RAID6 |
9793                        BTRFS_BLOCK_GROUP_DUP)))
9794                         continue;
9795                 /*
9796                  * avoid allocating from un-mirrored block group if there are
9797                  * mirrored block groups.
9798                  */
9799                 list_for_each_entry(cache,
9800                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9801                                 list)
9802                         inc_block_group_ro(cache, 1);
9803                 list_for_each_entry(cache,
9804                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9805                                 list)
9806                         inc_block_group_ro(cache, 1);
9807         }
9808
9809         init_global_block_rsv(info);
9810         ret = 0;
9811 error:
9812         btrfs_free_path(path);
9813         return ret;
9814 }
9815
9816 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9817                                        struct btrfs_root *root)
9818 {
9819         struct btrfs_block_group_cache *block_group, *tmp;
9820         struct btrfs_root *extent_root = root->fs_info->extent_root;
9821         struct btrfs_block_group_item item;
9822         struct btrfs_key key;
9823         int ret = 0;
9824         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9825
9826         trans->can_flush_pending_bgs = false;
9827         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9828                 if (ret)
9829                         goto next;
9830
9831                 spin_lock(&block_group->lock);
9832                 memcpy(&item, &block_group->item, sizeof(item));
9833                 memcpy(&key, &block_group->key, sizeof(key));
9834                 spin_unlock(&block_group->lock);
9835
9836                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9837                                         sizeof(item));
9838                 if (ret)
9839                         btrfs_abort_transaction(trans, extent_root, ret);
9840                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9841                                                key.objectid, key.offset);
9842                 if (ret)
9843                         btrfs_abort_transaction(trans, extent_root, ret);
9844 next:
9845                 list_del_init(&block_group->bg_list);
9846         }
9847         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9848 }
9849
9850 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9851                            struct btrfs_root *root, u64 bytes_used,
9852                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9853                            u64 size)
9854 {
9855         int ret;
9856         struct btrfs_root *extent_root;
9857         struct btrfs_block_group_cache *cache;
9858
9859         extent_root = root->fs_info->extent_root;
9860
9861         btrfs_set_log_full_commit(root->fs_info, trans);
9862
9863         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9864         if (!cache)
9865                 return -ENOMEM;
9866
9867         btrfs_set_block_group_used(&cache->item, bytes_used);
9868         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9869         btrfs_set_block_group_flags(&cache->item, type);
9870
9871         cache->flags = type;
9872         cache->last_byte_to_unpin = (u64)-1;
9873         cache->cached = BTRFS_CACHE_FINISHED;
9874         ret = exclude_super_stripes(root, cache);
9875         if (ret) {
9876                 /*
9877                  * We may have excluded something, so call this just in
9878                  * case.
9879                  */
9880                 free_excluded_extents(root, cache);
9881                 btrfs_put_block_group(cache);
9882                 return ret;
9883         }
9884
9885         add_new_free_space(cache, root->fs_info, chunk_offset,
9886                            chunk_offset + size);
9887
9888         free_excluded_extents(root, cache);
9889
9890 #ifdef CONFIG_BTRFS_DEBUG
9891         if (btrfs_should_fragment_free_space(root, cache)) {
9892                 u64 new_bytes_used = size - bytes_used;
9893
9894                 bytes_used += new_bytes_used >> 1;
9895                 fragment_free_space(root, cache);
9896         }
9897 #endif
9898         /*
9899          * Call to ensure the corresponding space_info object is created and
9900          * assigned to our block group, but don't update its counters just yet.
9901          * We want our bg to be added to the rbtree with its ->space_info set.
9902          */
9903         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9904                                 &cache->space_info);
9905         if (ret) {
9906                 btrfs_remove_free_space_cache(cache);
9907                 btrfs_put_block_group(cache);
9908                 return ret;
9909         }
9910
9911         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9912         if (ret) {
9913                 btrfs_remove_free_space_cache(cache);
9914                 btrfs_put_block_group(cache);
9915                 return ret;
9916         }
9917
9918         /*
9919          * Now that our block group has its ->space_info set and is inserted in
9920          * the rbtree, update the space info's counters.
9921          */
9922         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9923                                 &cache->space_info);
9924         if (ret) {
9925                 btrfs_remove_free_space_cache(cache);
9926                 spin_lock(&root->fs_info->block_group_cache_lock);
9927                 rb_erase(&cache->cache_node,
9928                          &root->fs_info->block_group_cache_tree);
9929                 RB_CLEAR_NODE(&cache->cache_node);
9930                 spin_unlock(&root->fs_info->block_group_cache_lock);
9931                 btrfs_put_block_group(cache);
9932                 return ret;
9933         }
9934         update_global_block_rsv(root->fs_info);
9935
9936         spin_lock(&cache->space_info->lock);
9937         cache->space_info->bytes_readonly += cache->bytes_super;
9938         spin_unlock(&cache->space_info->lock);
9939
9940         __link_block_group(cache->space_info, cache);
9941
9942         list_add_tail(&cache->bg_list, &trans->new_bgs);
9943
9944         set_avail_alloc_bits(extent_root->fs_info, type);
9945
9946         return 0;
9947 }
9948
9949 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9950 {
9951         u64 extra_flags = chunk_to_extended(flags) &
9952                                 BTRFS_EXTENDED_PROFILE_MASK;
9953
9954         write_seqlock(&fs_info->profiles_lock);
9955         if (flags & BTRFS_BLOCK_GROUP_DATA)
9956                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9957         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9958                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9959         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9960                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9961         write_sequnlock(&fs_info->profiles_lock);
9962 }
9963
9964 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9965                              struct btrfs_root *root, u64 group_start,
9966                              struct extent_map *em)
9967 {
9968         struct btrfs_path *path;
9969         struct btrfs_block_group_cache *block_group;
9970         struct btrfs_free_cluster *cluster;
9971         struct btrfs_root *tree_root = root->fs_info->tree_root;
9972         struct btrfs_key key;
9973         struct inode *inode;
9974         struct kobject *kobj = NULL;
9975         int ret;
9976         int index;
9977         int factor;
9978         struct btrfs_caching_control *caching_ctl = NULL;
9979         bool remove_em;
9980
9981         root = root->fs_info->extent_root;
9982
9983         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9984         BUG_ON(!block_group);
9985         BUG_ON(!block_group->ro);
9986
9987         /*
9988          * Free the reserved super bytes from this block group before
9989          * remove it.
9990          */
9991         free_excluded_extents(root, block_group);
9992
9993         memcpy(&key, &block_group->key, sizeof(key));
9994         index = get_block_group_index(block_group);
9995         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9996                                   BTRFS_BLOCK_GROUP_RAID1 |
9997                                   BTRFS_BLOCK_GROUP_RAID10))
9998                 factor = 2;
9999         else
10000                 factor = 1;
10001
10002         /* make sure this block group isn't part of an allocation cluster */
10003         cluster = &root->fs_info->data_alloc_cluster;
10004         spin_lock(&cluster->refill_lock);
10005         btrfs_return_cluster_to_free_space(block_group, cluster);
10006         spin_unlock(&cluster->refill_lock);
10007
10008         /*
10009          * make sure this block group isn't part of a metadata
10010          * allocation cluster
10011          */
10012         cluster = &root->fs_info->meta_alloc_cluster;
10013         spin_lock(&cluster->refill_lock);
10014         btrfs_return_cluster_to_free_space(block_group, cluster);
10015         spin_unlock(&cluster->refill_lock);
10016
10017         path = btrfs_alloc_path();
10018         if (!path) {
10019                 ret = -ENOMEM;
10020                 goto out;
10021         }
10022
10023         /*
10024          * get the inode first so any iput calls done for the io_list
10025          * aren't the final iput (no unlinks allowed now)
10026          */
10027         inode = lookup_free_space_inode(tree_root, block_group, path);
10028
10029         mutex_lock(&trans->transaction->cache_write_mutex);
10030         /*
10031          * make sure our free spache cache IO is done before remove the
10032          * free space inode
10033          */
10034         spin_lock(&trans->transaction->dirty_bgs_lock);
10035         if (!list_empty(&block_group->io_list)) {
10036                 list_del_init(&block_group->io_list);
10037
10038                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10039
10040                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10041                 btrfs_wait_cache_io(root, trans, block_group,
10042                                     &block_group->io_ctl, path,
10043                                     block_group->key.objectid);
10044                 btrfs_put_block_group(block_group);
10045                 spin_lock(&trans->transaction->dirty_bgs_lock);
10046         }
10047
10048         if (!list_empty(&block_group->dirty_list)) {
10049                 list_del_init(&block_group->dirty_list);
10050                 btrfs_put_block_group(block_group);
10051         }
10052         spin_unlock(&trans->transaction->dirty_bgs_lock);
10053         mutex_unlock(&trans->transaction->cache_write_mutex);
10054
10055         if (!IS_ERR(inode)) {
10056                 ret = btrfs_orphan_add(trans, inode);
10057                 if (ret) {
10058                         btrfs_add_delayed_iput(inode);
10059                         goto out;
10060                 }
10061                 clear_nlink(inode);
10062                 /* One for the block groups ref */
10063                 spin_lock(&block_group->lock);
10064                 if (block_group->iref) {
10065                         block_group->iref = 0;
10066                         block_group->inode = NULL;
10067                         spin_unlock(&block_group->lock);
10068                         iput(inode);
10069                 } else {
10070                         spin_unlock(&block_group->lock);
10071                 }
10072                 /* One for our lookup ref */
10073                 btrfs_add_delayed_iput(inode);
10074         }
10075
10076         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10077         key.offset = block_group->key.objectid;
10078         key.type = 0;
10079
10080         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10081         if (ret < 0)
10082                 goto out;
10083         if (ret > 0)
10084                 btrfs_release_path(path);
10085         if (ret == 0) {
10086                 ret = btrfs_del_item(trans, tree_root, path);
10087                 if (ret)
10088                         goto out;
10089                 btrfs_release_path(path);
10090         }
10091
10092         spin_lock(&root->fs_info->block_group_cache_lock);
10093         rb_erase(&block_group->cache_node,
10094                  &root->fs_info->block_group_cache_tree);
10095         RB_CLEAR_NODE(&block_group->cache_node);
10096
10097         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10098                 root->fs_info->first_logical_byte = (u64)-1;
10099         spin_unlock(&root->fs_info->block_group_cache_lock);
10100
10101         down_write(&block_group->space_info->groups_sem);
10102         /*
10103          * we must use list_del_init so people can check to see if they
10104          * are still on the list after taking the semaphore
10105          */
10106         list_del_init(&block_group->list);
10107         if (list_empty(&block_group->space_info->block_groups[index])) {
10108                 kobj = block_group->space_info->block_group_kobjs[index];
10109                 block_group->space_info->block_group_kobjs[index] = NULL;
10110                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10111         }
10112         up_write(&block_group->space_info->groups_sem);
10113         if (kobj) {
10114                 kobject_del(kobj);
10115                 kobject_put(kobj);
10116         }
10117
10118         if (block_group->has_caching_ctl)
10119                 caching_ctl = get_caching_control(block_group);
10120         if (block_group->cached == BTRFS_CACHE_STARTED)
10121                 wait_block_group_cache_done(block_group);
10122         if (block_group->has_caching_ctl) {
10123                 down_write(&root->fs_info->commit_root_sem);
10124                 if (!caching_ctl) {
10125                         struct btrfs_caching_control *ctl;
10126
10127                         list_for_each_entry(ctl,
10128                                     &root->fs_info->caching_block_groups, list)
10129                                 if (ctl->block_group == block_group) {
10130                                         caching_ctl = ctl;
10131                                         atomic_inc(&caching_ctl->count);
10132                                         break;
10133                                 }
10134                 }
10135                 if (caching_ctl)
10136                         list_del_init(&caching_ctl->list);
10137                 up_write(&root->fs_info->commit_root_sem);
10138                 if (caching_ctl) {
10139                         /* Once for the caching bgs list and once for us. */
10140                         put_caching_control(caching_ctl);
10141                         put_caching_control(caching_ctl);
10142                 }
10143         }
10144
10145         spin_lock(&trans->transaction->dirty_bgs_lock);
10146         if (!list_empty(&block_group->dirty_list)) {
10147                 WARN_ON(1);
10148         }
10149         if (!list_empty(&block_group->io_list)) {
10150                 WARN_ON(1);
10151         }
10152         spin_unlock(&trans->transaction->dirty_bgs_lock);
10153         btrfs_remove_free_space_cache(block_group);
10154
10155         spin_lock(&block_group->space_info->lock);
10156         list_del_init(&block_group->ro_list);
10157
10158         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10159                 WARN_ON(block_group->space_info->total_bytes
10160                         < block_group->key.offset);
10161                 WARN_ON(block_group->space_info->bytes_readonly
10162                         < block_group->key.offset);
10163                 WARN_ON(block_group->space_info->disk_total
10164                         < block_group->key.offset * factor);
10165         }
10166         block_group->space_info->total_bytes -= block_group->key.offset;
10167         block_group->space_info->bytes_readonly -= block_group->key.offset;
10168         block_group->space_info->disk_total -= block_group->key.offset * factor;
10169
10170         spin_unlock(&block_group->space_info->lock);
10171
10172         memcpy(&key, &block_group->key, sizeof(key));
10173
10174         lock_chunks(root);
10175         if (!list_empty(&em->list)) {
10176                 /* We're in the transaction->pending_chunks list. */
10177                 free_extent_map(em);
10178         }
10179         spin_lock(&block_group->lock);
10180         block_group->removed = 1;
10181         /*
10182          * At this point trimming can't start on this block group, because we
10183          * removed the block group from the tree fs_info->block_group_cache_tree
10184          * so no one can't find it anymore and even if someone already got this
10185          * block group before we removed it from the rbtree, they have already
10186          * incremented block_group->trimming - if they didn't, they won't find
10187          * any free space entries because we already removed them all when we
10188          * called btrfs_remove_free_space_cache().
10189          *
10190          * And we must not remove the extent map from the fs_info->mapping_tree
10191          * to prevent the same logical address range and physical device space
10192          * ranges from being reused for a new block group. This is because our
10193          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10194          * completely transactionless, so while it is trimming a range the
10195          * currently running transaction might finish and a new one start,
10196          * allowing for new block groups to be created that can reuse the same
10197          * physical device locations unless we take this special care.
10198          *
10199          * There may also be an implicit trim operation if the file system
10200          * is mounted with -odiscard. The same protections must remain
10201          * in place until the extents have been discarded completely when
10202          * the transaction commit has completed.
10203          */
10204         remove_em = (atomic_read(&block_group->trimming) == 0);
10205         /*
10206          * Make sure a trimmer task always sees the em in the pinned_chunks list
10207          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10208          * before checking block_group->removed).
10209          */
10210         if (!remove_em) {
10211                 /*
10212                  * Our em might be in trans->transaction->pending_chunks which
10213                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10214                  * and so is the fs_info->pinned_chunks list.
10215                  *
10216                  * So at this point we must be holding the chunk_mutex to avoid
10217                  * any races with chunk allocation (more specifically at
10218                  * volumes.c:contains_pending_extent()), to ensure it always
10219                  * sees the em, either in the pending_chunks list or in the
10220                  * pinned_chunks list.
10221                  */
10222                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10223         }
10224         spin_unlock(&block_group->lock);
10225
10226         if (remove_em) {
10227                 struct extent_map_tree *em_tree;
10228
10229                 em_tree = &root->fs_info->mapping_tree.map_tree;
10230                 write_lock(&em_tree->lock);
10231                 /*
10232                  * The em might be in the pending_chunks list, so make sure the
10233                  * chunk mutex is locked, since remove_extent_mapping() will
10234                  * delete us from that list.
10235                  */
10236                 remove_extent_mapping(em_tree, em);
10237                 write_unlock(&em_tree->lock);
10238                 /* once for the tree */
10239                 free_extent_map(em);
10240         }
10241
10242         unlock_chunks(root);
10243
10244         btrfs_put_block_group(block_group);
10245         btrfs_put_block_group(block_group);
10246
10247         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10248         if (ret > 0)
10249                 ret = -EIO;
10250         if (ret < 0)
10251                 goto out;
10252
10253         ret = btrfs_del_item(trans, root, path);
10254 out:
10255         btrfs_free_path(path);
10256         return ret;
10257 }
10258
10259 /*
10260  * Process the unused_bgs list and remove any that don't have any allocated
10261  * space inside of them.
10262  */
10263 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10264 {
10265         struct btrfs_block_group_cache *block_group;
10266         struct btrfs_space_info *space_info;
10267         struct btrfs_root *root = fs_info->extent_root;
10268         struct btrfs_trans_handle *trans;
10269         int ret = 0;
10270
10271         if (!fs_info->open)
10272                 return;
10273
10274         spin_lock(&fs_info->unused_bgs_lock);
10275         while (!list_empty(&fs_info->unused_bgs)) {
10276                 u64 start, end;
10277                 int trimming;
10278
10279                 block_group = list_first_entry(&fs_info->unused_bgs,
10280                                                struct btrfs_block_group_cache,
10281                                                bg_list);
10282                 list_del_init(&block_group->bg_list);
10283
10284                 space_info = block_group->space_info;
10285
10286                 if (ret || btrfs_mixed_space_info(space_info)) {
10287                         btrfs_put_block_group(block_group);
10288                         continue;
10289                 }
10290                 spin_unlock(&fs_info->unused_bgs_lock);
10291
10292                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10293
10294                 /* Don't want to race with allocators so take the groups_sem */
10295                 down_write(&space_info->groups_sem);
10296                 spin_lock(&block_group->lock);
10297                 if (block_group->reserved ||
10298                     btrfs_block_group_used(&block_group->item) ||
10299                     block_group->ro ||
10300                     list_is_singular(&block_group->list)) {
10301                         /*
10302                          * We want to bail if we made new allocations or have
10303                          * outstanding allocations in this block group.  We do
10304                          * the ro check in case balance is currently acting on
10305                          * this block group.
10306                          */
10307                         spin_unlock(&block_group->lock);
10308                         up_write(&space_info->groups_sem);
10309                         goto next;
10310                 }
10311                 spin_unlock(&block_group->lock);
10312
10313                 /* We don't want to force the issue, only flip if it's ok. */
10314                 ret = inc_block_group_ro(block_group, 0);
10315                 up_write(&space_info->groups_sem);
10316                 if (ret < 0) {
10317                         ret = 0;
10318                         goto next;
10319                 }
10320
10321                 /*
10322                  * Want to do this before we do anything else so we can recover
10323                  * properly if we fail to join the transaction.
10324                  */
10325                 /* 1 for btrfs_orphan_reserve_metadata() */
10326                 trans = btrfs_start_transaction(root, 1);
10327                 if (IS_ERR(trans)) {
10328                         btrfs_dec_block_group_ro(root, block_group);
10329                         ret = PTR_ERR(trans);
10330                         goto next;
10331                 }
10332
10333                 /*
10334                  * We could have pending pinned extents for this block group,
10335                  * just delete them, we don't care about them anymore.
10336                  */
10337                 start = block_group->key.objectid;
10338                 end = start + block_group->key.offset - 1;
10339                 /*
10340                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10341                  * btrfs_finish_extent_commit(). If we are at transaction N,
10342                  * another task might be running finish_extent_commit() for the
10343                  * previous transaction N - 1, and have seen a range belonging
10344                  * to the block group in freed_extents[] before we were able to
10345                  * clear the whole block group range from freed_extents[]. This
10346                  * means that task can lookup for the block group after we
10347                  * unpinned it from freed_extents[] and removed it, leading to
10348                  * a BUG_ON() at btrfs_unpin_extent_range().
10349                  */
10350                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10351                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10352                                   EXTENT_DIRTY, GFP_NOFS);
10353                 if (ret) {
10354                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10355                         btrfs_dec_block_group_ro(root, block_group);
10356                         goto end_trans;
10357                 }
10358                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10359                                   EXTENT_DIRTY, GFP_NOFS);
10360                 if (ret) {
10361                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10362                         btrfs_dec_block_group_ro(root, block_group);
10363                         goto end_trans;
10364                 }
10365                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10366
10367                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10368                 spin_lock(&space_info->lock);
10369                 spin_lock(&block_group->lock);
10370
10371                 space_info->bytes_pinned -= block_group->pinned;
10372                 space_info->bytes_readonly += block_group->pinned;
10373                 percpu_counter_add(&space_info->total_bytes_pinned,
10374                                    -block_group->pinned);
10375                 block_group->pinned = 0;
10376
10377                 spin_unlock(&block_group->lock);
10378                 spin_unlock(&space_info->lock);
10379
10380                 /* DISCARD can flip during remount */
10381                 trimming = btrfs_test_opt(root, DISCARD);
10382
10383                 /* Implicit trim during transaction commit. */
10384                 if (trimming)
10385                         btrfs_get_block_group_trimming(block_group);
10386
10387                 /*
10388                  * Btrfs_remove_chunk will abort the transaction if things go
10389                  * horribly wrong.
10390                  */
10391                 ret = btrfs_remove_chunk(trans, root,
10392                                          block_group->key.objectid);
10393
10394                 if (ret) {
10395                         if (trimming)
10396                                 btrfs_put_block_group_trimming(block_group);
10397                         goto end_trans;
10398                 }
10399
10400                 /*
10401                  * If we're not mounted with -odiscard, we can just forget
10402                  * about this block group. Otherwise we'll need to wait
10403                  * until transaction commit to do the actual discard.
10404                  */
10405                 if (trimming) {
10406                         WARN_ON(!list_empty(&block_group->bg_list));
10407                         spin_lock(&trans->transaction->deleted_bgs_lock);
10408                         list_move(&block_group->bg_list,
10409                                   &trans->transaction->deleted_bgs);
10410                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10411                         btrfs_get_block_group(block_group);
10412                 }
10413 end_trans:
10414                 btrfs_end_transaction(trans, root);
10415 next:
10416                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10417                 btrfs_put_block_group(block_group);
10418                 spin_lock(&fs_info->unused_bgs_lock);
10419         }
10420         spin_unlock(&fs_info->unused_bgs_lock);
10421 }
10422
10423 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10424 {
10425         struct btrfs_space_info *space_info;
10426         struct btrfs_super_block *disk_super;
10427         u64 features;
10428         u64 flags;
10429         int mixed = 0;
10430         int ret;
10431
10432         disk_super = fs_info->super_copy;
10433         if (!btrfs_super_root(disk_super))
10434                 return 1;
10435
10436         features = btrfs_super_incompat_flags(disk_super);
10437         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10438                 mixed = 1;
10439
10440         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10441         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10442         if (ret)
10443                 goto out;
10444
10445         if (mixed) {
10446                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10447                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10448         } else {
10449                 flags = BTRFS_BLOCK_GROUP_METADATA;
10450                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10451                 if (ret)
10452                         goto out;
10453
10454                 flags = BTRFS_BLOCK_GROUP_DATA;
10455                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10456         }
10457 out:
10458         return ret;
10459 }
10460
10461 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10462 {
10463         return unpin_extent_range(root, start, end, false);
10464 }
10465
10466 /*
10467  * It used to be that old block groups would be left around forever.
10468  * Iterating over them would be enough to trim unused space.  Since we
10469  * now automatically remove them, we also need to iterate over unallocated
10470  * space.
10471  *
10472  * We don't want a transaction for this since the discard may take a
10473  * substantial amount of time.  We don't require that a transaction be
10474  * running, but we do need to take a running transaction into account
10475  * to ensure that we're not discarding chunks that were released in
10476  * the current transaction.
10477  *
10478  * Holding the chunks lock will prevent other threads from allocating
10479  * or releasing chunks, but it won't prevent a running transaction
10480  * from committing and releasing the memory that the pending chunks
10481  * list head uses.  For that, we need to take a reference to the
10482  * transaction.
10483  */
10484 static int btrfs_trim_free_extents(struct btrfs_device *device,
10485                                    u64 minlen, u64 *trimmed)
10486 {
10487         u64 start = 0, len = 0;
10488         int ret;
10489
10490         *trimmed = 0;
10491
10492         /* Not writeable = nothing to do. */
10493         if (!device->writeable)
10494                 return 0;
10495
10496         /* No free space = nothing to do. */
10497         if (device->total_bytes <= device->bytes_used)
10498                 return 0;
10499
10500         ret = 0;
10501
10502         while (1) {
10503                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10504                 struct btrfs_transaction *trans;
10505                 u64 bytes;
10506
10507                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10508                 if (ret)
10509                         return ret;
10510
10511                 down_read(&fs_info->commit_root_sem);
10512
10513                 spin_lock(&fs_info->trans_lock);
10514                 trans = fs_info->running_transaction;
10515                 if (trans)
10516                         atomic_inc(&trans->use_count);
10517                 spin_unlock(&fs_info->trans_lock);
10518
10519                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10520                                                  &start, &len);
10521                 if (trans)
10522                         btrfs_put_transaction(trans);
10523
10524                 if (ret) {
10525                         up_read(&fs_info->commit_root_sem);
10526                         mutex_unlock(&fs_info->chunk_mutex);
10527                         if (ret == -ENOSPC)
10528                                 ret = 0;
10529                         break;
10530                 }
10531
10532                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10533                 up_read(&fs_info->commit_root_sem);
10534                 mutex_unlock(&fs_info->chunk_mutex);
10535
10536                 if (ret)
10537                         break;
10538
10539                 start += len;
10540                 *trimmed += bytes;
10541
10542                 if (fatal_signal_pending(current)) {
10543                         ret = -ERESTARTSYS;
10544                         break;
10545                 }
10546
10547                 cond_resched();
10548         }
10549
10550         return ret;
10551 }
10552
10553 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10554 {
10555         struct btrfs_fs_info *fs_info = root->fs_info;
10556         struct btrfs_block_group_cache *cache = NULL;
10557         struct btrfs_device *device;
10558         struct list_head *devices;
10559         u64 group_trimmed;
10560         u64 start;
10561         u64 end;
10562         u64 trimmed = 0;
10563         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10564         int ret = 0;
10565
10566         /*
10567          * try to trim all FS space, our block group may start from non-zero.
10568          */
10569         if (range->len == total_bytes)
10570                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10571         else
10572                 cache = btrfs_lookup_block_group(fs_info, range->start);
10573
10574         while (cache) {
10575                 if (cache->key.objectid >= (range->start + range->len)) {
10576                         btrfs_put_block_group(cache);
10577                         break;
10578                 }
10579
10580                 start = max(range->start, cache->key.objectid);
10581                 end = min(range->start + range->len,
10582                                 cache->key.objectid + cache->key.offset);
10583
10584                 if (end - start >= range->minlen) {
10585                         if (!block_group_cache_done(cache)) {
10586                                 ret = cache_block_group(cache, 0);
10587                                 if (ret) {
10588                                         btrfs_put_block_group(cache);
10589                                         break;
10590                                 }
10591                                 ret = wait_block_group_cache_done(cache);
10592                                 if (ret) {
10593                                         btrfs_put_block_group(cache);
10594                                         break;
10595                                 }
10596                         }
10597                         ret = btrfs_trim_block_group(cache,
10598                                                      &group_trimmed,
10599                                                      start,
10600                                                      end,
10601                                                      range->minlen);
10602
10603                         trimmed += group_trimmed;
10604                         if (ret) {
10605                                 btrfs_put_block_group(cache);
10606                                 break;
10607                         }
10608                 }
10609
10610                 cache = next_block_group(fs_info->tree_root, cache);
10611         }
10612
10613         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10614         devices = &root->fs_info->fs_devices->alloc_list;
10615         list_for_each_entry(device, devices, dev_alloc_list) {
10616                 ret = btrfs_trim_free_extents(device, range->minlen,
10617                                               &group_trimmed);
10618                 if (ret)
10619                         break;
10620
10621                 trimmed += group_trimmed;
10622         }
10623         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10624
10625         range->len = trimmed;
10626         return ret;
10627 }
10628
10629 /*
10630  * btrfs_{start,end}_write_no_snapshoting() are similar to
10631  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10632  * data into the page cache through nocow before the subvolume is snapshoted,
10633  * but flush the data into disk after the snapshot creation, or to prevent
10634  * operations while snapshoting is ongoing and that cause the snapshot to be
10635  * inconsistent (writes followed by expanding truncates for example).
10636  */
10637 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10638 {
10639         percpu_counter_dec(&root->subv_writers->counter);
10640         /*
10641          * Make sure counter is updated before we wake up waiters.
10642          */
10643         smp_mb();
10644         if (waitqueue_active(&root->subv_writers->wait))
10645                 wake_up(&root->subv_writers->wait);
10646 }
10647
10648 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10649 {
10650         if (atomic_read(&root->will_be_snapshoted))
10651                 return 0;
10652
10653         percpu_counter_inc(&root->subv_writers->counter);
10654         /*
10655          * Make sure counter is updated before we check for snapshot creation.
10656          */
10657         smp_mb();
10658         if (atomic_read(&root->will_be_snapshoted)) {
10659                 btrfs_end_write_no_snapshoting(root);
10660                 return 0;
10661         }
10662         return 1;
10663 }