Merge tag 'please-pull-misc-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58         CHUNK_ALLOC_NO_FORCE = 0,
59         CHUNK_ALLOC_LIMITED = 1,
60         CHUNK_ALLOC_FORCE = 2,
61 };
62
63 /*
64  * Control how reservations are dealt with.
65  *
66  * RESERVE_FREE - freeing a reservation.
67  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68  *   ENOSPC accounting
69  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
70  *   bytes_may_use as the ENOSPC accounting is done elsewhere
71  */
72 enum {
73         RESERVE_FREE = 0,
74         RESERVE_ALLOC = 1,
75         RESERVE_ALLOC_NO_ACCOUNT = 2,
76 };
77
78 static int update_block_group(struct btrfs_trans_handle *trans,
79                               struct btrfs_root *root, u64 bytenr,
80                               u64 num_bytes, int alloc);
81 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
82                                 struct btrfs_root *root,
83                                 struct btrfs_delayed_ref_node *node, u64 parent,
84                                 u64 root_objectid, u64 owner_objectid,
85                                 u64 owner_offset, int refs_to_drop,
86                                 struct btrfs_delayed_extent_op *extra_op);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88                                     struct extent_buffer *leaf,
89                                     struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91                                       struct btrfs_root *root,
92                                       u64 parent, u64 root_objectid,
93                                       u64 flags, u64 owner, u64 offset,
94                                       struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96                                      struct btrfs_root *root,
97                                      u64 parent, u64 root_objectid,
98                                      u64 flags, struct btrfs_disk_key *key,
99                                      int level, struct btrfs_key *ins);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114 static int __reserve_metadata_bytes(struct btrfs_root *root,
115                                     struct btrfs_space_info *space_info,
116                                     u64 orig_bytes,
117                                     enum btrfs_reserve_flush_enum flush);
118 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
119                                      struct btrfs_space_info *space_info,
120                                      u64 num_bytes);
121 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
122                                      struct btrfs_space_info *space_info,
123                                      u64 num_bytes);
124
125 static noinline int
126 block_group_cache_done(struct btrfs_block_group_cache *cache)
127 {
128         smp_mb();
129         return cache->cached == BTRFS_CACHE_FINISHED ||
130                 cache->cached == BTRFS_CACHE_ERROR;
131 }
132
133 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
134 {
135         return (cache->flags & bits) == bits;
136 }
137
138 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
139 {
140         atomic_inc(&cache->count);
141 }
142
143 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
144 {
145         if (atomic_dec_and_test(&cache->count)) {
146                 WARN_ON(cache->pinned > 0);
147                 WARN_ON(cache->reserved > 0);
148                 kfree(cache->free_space_ctl);
149                 kfree(cache);
150         }
151 }
152
153 /*
154  * this adds the block group to the fs_info rb tree for the block group
155  * cache
156  */
157 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
158                                 struct btrfs_block_group_cache *block_group)
159 {
160         struct rb_node **p;
161         struct rb_node *parent = NULL;
162         struct btrfs_block_group_cache *cache;
163
164         spin_lock(&info->block_group_cache_lock);
165         p = &info->block_group_cache_tree.rb_node;
166
167         while (*p) {
168                 parent = *p;
169                 cache = rb_entry(parent, struct btrfs_block_group_cache,
170                                  cache_node);
171                 if (block_group->key.objectid < cache->key.objectid) {
172                         p = &(*p)->rb_left;
173                 } else if (block_group->key.objectid > cache->key.objectid) {
174                         p = &(*p)->rb_right;
175                 } else {
176                         spin_unlock(&info->block_group_cache_lock);
177                         return -EEXIST;
178                 }
179         }
180
181         rb_link_node(&block_group->cache_node, parent, p);
182         rb_insert_color(&block_group->cache_node,
183                         &info->block_group_cache_tree);
184
185         if (info->first_logical_byte > block_group->key.objectid)
186                 info->first_logical_byte = block_group->key.objectid;
187
188         spin_unlock(&info->block_group_cache_lock);
189
190         return 0;
191 }
192
193 /*
194  * This will return the block group at or after bytenr if contains is 0, else
195  * it will return the block group that contains the bytenr
196  */
197 static struct btrfs_block_group_cache *
198 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
199                               int contains)
200 {
201         struct btrfs_block_group_cache *cache, *ret = NULL;
202         struct rb_node *n;
203         u64 end, start;
204
205         spin_lock(&info->block_group_cache_lock);
206         n = info->block_group_cache_tree.rb_node;
207
208         while (n) {
209                 cache = rb_entry(n, struct btrfs_block_group_cache,
210                                  cache_node);
211                 end = cache->key.objectid + cache->key.offset - 1;
212                 start = cache->key.objectid;
213
214                 if (bytenr < start) {
215                         if (!contains && (!ret || start < ret->key.objectid))
216                                 ret = cache;
217                         n = n->rb_left;
218                 } else if (bytenr > start) {
219                         if (contains && bytenr <= end) {
220                                 ret = cache;
221                                 break;
222                         }
223                         n = n->rb_right;
224                 } else {
225                         ret = cache;
226                         break;
227                 }
228         }
229         if (ret) {
230                 btrfs_get_block_group(ret);
231                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
232                         info->first_logical_byte = ret->key.objectid;
233         }
234         spin_unlock(&info->block_group_cache_lock);
235
236         return ret;
237 }
238
239 static int add_excluded_extent(struct btrfs_root *root,
240                                u64 start, u64 num_bytes)
241 {
242         u64 end = start + num_bytes - 1;
243         set_extent_bits(&root->fs_info->freed_extents[0],
244                         start, end, EXTENT_UPTODATE);
245         set_extent_bits(&root->fs_info->freed_extents[1],
246                         start, end, EXTENT_UPTODATE);
247         return 0;
248 }
249
250 static void free_excluded_extents(struct btrfs_root *root,
251                                   struct btrfs_block_group_cache *cache)
252 {
253         u64 start, end;
254
255         start = cache->key.objectid;
256         end = start + cache->key.offset - 1;
257
258         clear_extent_bits(&root->fs_info->freed_extents[0],
259                           start, end, EXTENT_UPTODATE);
260         clear_extent_bits(&root->fs_info->freed_extents[1],
261                           start, end, EXTENT_UPTODATE);
262 }
263
264 static int exclude_super_stripes(struct btrfs_root *root,
265                                  struct btrfs_block_group_cache *cache)
266 {
267         u64 bytenr;
268         u64 *logical;
269         int stripe_len;
270         int i, nr, ret;
271
272         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
273                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
274                 cache->bytes_super += stripe_len;
275                 ret = add_excluded_extent(root, cache->key.objectid,
276                                           stripe_len);
277                 if (ret)
278                         return ret;
279         }
280
281         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
282                 bytenr = btrfs_sb_offset(i);
283                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
284                                        cache->key.objectid, bytenr,
285                                        0, &logical, &nr, &stripe_len);
286                 if (ret)
287                         return ret;
288
289                 while (nr--) {
290                         u64 start, len;
291
292                         if (logical[nr] > cache->key.objectid +
293                             cache->key.offset)
294                                 continue;
295
296                         if (logical[nr] + stripe_len <= cache->key.objectid)
297                                 continue;
298
299                         start = logical[nr];
300                         if (start < cache->key.objectid) {
301                                 start = cache->key.objectid;
302                                 len = (logical[nr] + stripe_len) - start;
303                         } else {
304                                 len = min_t(u64, stripe_len,
305                                             cache->key.objectid +
306                                             cache->key.offset - start);
307                         }
308
309                         cache->bytes_super += len;
310                         ret = add_excluded_extent(root, start, len);
311                         if (ret) {
312                                 kfree(logical);
313                                 return ret;
314                         }
315                 }
316
317                 kfree(logical);
318         }
319         return 0;
320 }
321
322 static struct btrfs_caching_control *
323 get_caching_control(struct btrfs_block_group_cache *cache)
324 {
325         struct btrfs_caching_control *ctl;
326
327         spin_lock(&cache->lock);
328         if (!cache->caching_ctl) {
329                 spin_unlock(&cache->lock);
330                 return NULL;
331         }
332
333         ctl = cache->caching_ctl;
334         atomic_inc(&ctl->count);
335         spin_unlock(&cache->lock);
336         return ctl;
337 }
338
339 static void put_caching_control(struct btrfs_caching_control *ctl)
340 {
341         if (atomic_dec_and_test(&ctl->count))
342                 kfree(ctl);
343 }
344
345 #ifdef CONFIG_BTRFS_DEBUG
346 static void fragment_free_space(struct btrfs_root *root,
347                                 struct btrfs_block_group_cache *block_group)
348 {
349         u64 start = block_group->key.objectid;
350         u64 len = block_group->key.offset;
351         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
352                 root->nodesize : root->sectorsize;
353         u64 step = chunk << 1;
354
355         while (len > chunk) {
356                 btrfs_remove_free_space(block_group, start, chunk);
357                 start += step;
358                 if (len < step)
359                         len = 0;
360                 else
361                         len -= step;
362         }
363 }
364 #endif
365
366 /*
367  * this is only called by cache_block_group, since we could have freed extents
368  * we need to check the pinned_extents for any extents that can't be used yet
369  * since their free space will be released as soon as the transaction commits.
370  */
371 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
372                        struct btrfs_fs_info *info, u64 start, u64 end)
373 {
374         u64 extent_start, extent_end, size, total_added = 0;
375         int ret;
376
377         while (start < end) {
378                 ret = find_first_extent_bit(info->pinned_extents, start,
379                                             &extent_start, &extent_end,
380                                             EXTENT_DIRTY | EXTENT_UPTODATE,
381                                             NULL);
382                 if (ret)
383                         break;
384
385                 if (extent_start <= start) {
386                         start = extent_end + 1;
387                 } else if (extent_start > start && extent_start < end) {
388                         size = extent_start - start;
389                         total_added += size;
390                         ret = btrfs_add_free_space(block_group, start,
391                                                    size);
392                         BUG_ON(ret); /* -ENOMEM or logic error */
393                         start = extent_end + 1;
394                 } else {
395                         break;
396                 }
397         }
398
399         if (start < end) {
400                 size = end - start;
401                 total_added += size;
402                 ret = btrfs_add_free_space(block_group, start, size);
403                 BUG_ON(ret); /* -ENOMEM or logic error */
404         }
405
406         return total_added;
407 }
408
409 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
410 {
411         struct btrfs_block_group_cache *block_group;
412         struct btrfs_fs_info *fs_info;
413         struct btrfs_root *extent_root;
414         struct btrfs_path *path;
415         struct extent_buffer *leaf;
416         struct btrfs_key key;
417         u64 total_found = 0;
418         u64 last = 0;
419         u32 nritems;
420         int ret;
421         bool wakeup = true;
422
423         block_group = caching_ctl->block_group;
424         fs_info = block_group->fs_info;
425         extent_root = fs_info->extent_root;
426
427         path = btrfs_alloc_path();
428         if (!path)
429                 return -ENOMEM;
430
431         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
432
433 #ifdef CONFIG_BTRFS_DEBUG
434         /*
435          * If we're fragmenting we don't want to make anybody think we can
436          * allocate from this block group until we've had a chance to fragment
437          * the free space.
438          */
439         if (btrfs_should_fragment_free_space(extent_root, block_group))
440                 wakeup = false;
441 #endif
442         /*
443          * We don't want to deadlock with somebody trying to allocate a new
444          * extent for the extent root while also trying to search the extent
445          * root to add free space.  So we skip locking and search the commit
446          * root, since its read-only
447          */
448         path->skip_locking = 1;
449         path->search_commit_root = 1;
450         path->reada = READA_FORWARD;
451
452         key.objectid = last;
453         key.offset = 0;
454         key.type = BTRFS_EXTENT_ITEM_KEY;
455
456 next:
457         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
458         if (ret < 0)
459                 goto out;
460
461         leaf = path->nodes[0];
462         nritems = btrfs_header_nritems(leaf);
463
464         while (1) {
465                 if (btrfs_fs_closing(fs_info) > 1) {
466                         last = (u64)-1;
467                         break;
468                 }
469
470                 if (path->slots[0] < nritems) {
471                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
472                 } else {
473                         ret = find_next_key(path, 0, &key);
474                         if (ret)
475                                 break;
476
477                         if (need_resched() ||
478                             rwsem_is_contended(&fs_info->commit_root_sem)) {
479                                 if (wakeup)
480                                         caching_ctl->progress = last;
481                                 btrfs_release_path(path);
482                                 up_read(&fs_info->commit_root_sem);
483                                 mutex_unlock(&caching_ctl->mutex);
484                                 cond_resched();
485                                 mutex_lock(&caching_ctl->mutex);
486                                 down_read(&fs_info->commit_root_sem);
487                                 goto next;
488                         }
489
490                         ret = btrfs_next_leaf(extent_root, path);
491                         if (ret < 0)
492                                 goto out;
493                         if (ret)
494                                 break;
495                         leaf = path->nodes[0];
496                         nritems = btrfs_header_nritems(leaf);
497                         continue;
498                 }
499
500                 if (key.objectid < last) {
501                         key.objectid = last;
502                         key.offset = 0;
503                         key.type = BTRFS_EXTENT_ITEM_KEY;
504
505                         if (wakeup)
506                                 caching_ctl->progress = last;
507                         btrfs_release_path(path);
508                         goto next;
509                 }
510
511                 if (key.objectid < block_group->key.objectid) {
512                         path->slots[0]++;
513                         continue;
514                 }
515
516                 if (key.objectid >= block_group->key.objectid +
517                     block_group->key.offset)
518                         break;
519
520                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
521                     key.type == BTRFS_METADATA_ITEM_KEY) {
522                         total_found += add_new_free_space(block_group,
523                                                           fs_info, last,
524                                                           key.objectid);
525                         if (key.type == BTRFS_METADATA_ITEM_KEY)
526                                 last = key.objectid +
527                                         fs_info->tree_root->nodesize;
528                         else
529                                 last = key.objectid + key.offset;
530
531                         if (total_found > CACHING_CTL_WAKE_UP) {
532                                 total_found = 0;
533                                 if (wakeup)
534                                         wake_up(&caching_ctl->wait);
535                         }
536                 }
537                 path->slots[0]++;
538         }
539         ret = 0;
540
541         total_found += add_new_free_space(block_group, fs_info, last,
542                                           block_group->key.objectid +
543                                           block_group->key.offset);
544         caching_ctl->progress = (u64)-1;
545
546 out:
547         btrfs_free_path(path);
548         return ret;
549 }
550
551 static noinline void caching_thread(struct btrfs_work *work)
552 {
553         struct btrfs_block_group_cache *block_group;
554         struct btrfs_fs_info *fs_info;
555         struct btrfs_caching_control *caching_ctl;
556         struct btrfs_root *extent_root;
557         int ret;
558
559         caching_ctl = container_of(work, struct btrfs_caching_control, work);
560         block_group = caching_ctl->block_group;
561         fs_info = block_group->fs_info;
562         extent_root = fs_info->extent_root;
563
564         mutex_lock(&caching_ctl->mutex);
565         down_read(&fs_info->commit_root_sem);
566
567         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
568                 ret = load_free_space_tree(caching_ctl);
569         else
570                 ret = load_extent_tree_free(caching_ctl);
571
572         spin_lock(&block_group->lock);
573         block_group->caching_ctl = NULL;
574         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
575         spin_unlock(&block_group->lock);
576
577 #ifdef CONFIG_BTRFS_DEBUG
578         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
579                 u64 bytes_used;
580
581                 spin_lock(&block_group->space_info->lock);
582                 spin_lock(&block_group->lock);
583                 bytes_used = block_group->key.offset -
584                         btrfs_block_group_used(&block_group->item);
585                 block_group->space_info->bytes_used += bytes_used >> 1;
586                 spin_unlock(&block_group->lock);
587                 spin_unlock(&block_group->space_info->lock);
588                 fragment_free_space(extent_root, block_group);
589         }
590 #endif
591
592         caching_ctl->progress = (u64)-1;
593
594         up_read(&fs_info->commit_root_sem);
595         free_excluded_extents(fs_info->extent_root, block_group);
596         mutex_unlock(&caching_ctl->mutex);
597
598         wake_up(&caching_ctl->wait);
599
600         put_caching_control(caching_ctl);
601         btrfs_put_block_group(block_group);
602 }
603
604 static int cache_block_group(struct btrfs_block_group_cache *cache,
605                              int load_cache_only)
606 {
607         DEFINE_WAIT(wait);
608         struct btrfs_fs_info *fs_info = cache->fs_info;
609         struct btrfs_caching_control *caching_ctl;
610         int ret = 0;
611
612         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
613         if (!caching_ctl)
614                 return -ENOMEM;
615
616         INIT_LIST_HEAD(&caching_ctl->list);
617         mutex_init(&caching_ctl->mutex);
618         init_waitqueue_head(&caching_ctl->wait);
619         caching_ctl->block_group = cache;
620         caching_ctl->progress = cache->key.objectid;
621         atomic_set(&caching_ctl->count, 1);
622         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
623                         caching_thread, NULL, NULL);
624
625         spin_lock(&cache->lock);
626         /*
627          * This should be a rare occasion, but this could happen I think in the
628          * case where one thread starts to load the space cache info, and then
629          * some other thread starts a transaction commit which tries to do an
630          * allocation while the other thread is still loading the space cache
631          * info.  The previous loop should have kept us from choosing this block
632          * group, but if we've moved to the state where we will wait on caching
633          * block groups we need to first check if we're doing a fast load here,
634          * so we can wait for it to finish, otherwise we could end up allocating
635          * from a block group who's cache gets evicted for one reason or
636          * another.
637          */
638         while (cache->cached == BTRFS_CACHE_FAST) {
639                 struct btrfs_caching_control *ctl;
640
641                 ctl = cache->caching_ctl;
642                 atomic_inc(&ctl->count);
643                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
644                 spin_unlock(&cache->lock);
645
646                 schedule();
647
648                 finish_wait(&ctl->wait, &wait);
649                 put_caching_control(ctl);
650                 spin_lock(&cache->lock);
651         }
652
653         if (cache->cached != BTRFS_CACHE_NO) {
654                 spin_unlock(&cache->lock);
655                 kfree(caching_ctl);
656                 return 0;
657         }
658         WARN_ON(cache->caching_ctl);
659         cache->caching_ctl = caching_ctl;
660         cache->cached = BTRFS_CACHE_FAST;
661         spin_unlock(&cache->lock);
662
663         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
664                 mutex_lock(&caching_ctl->mutex);
665                 ret = load_free_space_cache(fs_info, cache);
666
667                 spin_lock(&cache->lock);
668                 if (ret == 1) {
669                         cache->caching_ctl = NULL;
670                         cache->cached = BTRFS_CACHE_FINISHED;
671                         cache->last_byte_to_unpin = (u64)-1;
672                         caching_ctl->progress = (u64)-1;
673                 } else {
674                         if (load_cache_only) {
675                                 cache->caching_ctl = NULL;
676                                 cache->cached = BTRFS_CACHE_NO;
677                         } else {
678                                 cache->cached = BTRFS_CACHE_STARTED;
679                                 cache->has_caching_ctl = 1;
680                         }
681                 }
682                 spin_unlock(&cache->lock);
683 #ifdef CONFIG_BTRFS_DEBUG
684                 if (ret == 1 &&
685                     btrfs_should_fragment_free_space(fs_info->extent_root,
686                                                      cache)) {
687                         u64 bytes_used;
688
689                         spin_lock(&cache->space_info->lock);
690                         spin_lock(&cache->lock);
691                         bytes_used = cache->key.offset -
692                                 btrfs_block_group_used(&cache->item);
693                         cache->space_info->bytes_used += bytes_used >> 1;
694                         spin_unlock(&cache->lock);
695                         spin_unlock(&cache->space_info->lock);
696                         fragment_free_space(fs_info->extent_root, cache);
697                 }
698 #endif
699                 mutex_unlock(&caching_ctl->mutex);
700
701                 wake_up(&caching_ctl->wait);
702                 if (ret == 1) {
703                         put_caching_control(caching_ctl);
704                         free_excluded_extents(fs_info->extent_root, cache);
705                         return 0;
706                 }
707         } else {
708                 /*
709                  * We're either using the free space tree or no caching at all.
710                  * Set cached to the appropriate value and wakeup any waiters.
711                  */
712                 spin_lock(&cache->lock);
713                 if (load_cache_only) {
714                         cache->caching_ctl = NULL;
715                         cache->cached = BTRFS_CACHE_NO;
716                 } else {
717                         cache->cached = BTRFS_CACHE_STARTED;
718                         cache->has_caching_ctl = 1;
719                 }
720                 spin_unlock(&cache->lock);
721                 wake_up(&caching_ctl->wait);
722         }
723
724         if (load_cache_only) {
725                 put_caching_control(caching_ctl);
726                 return 0;
727         }
728
729         down_write(&fs_info->commit_root_sem);
730         atomic_inc(&caching_ctl->count);
731         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
732         up_write(&fs_info->commit_root_sem);
733
734         btrfs_get_block_group(cache);
735
736         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
737
738         return ret;
739 }
740
741 /*
742  * return the block group that starts at or after bytenr
743  */
744 static struct btrfs_block_group_cache *
745 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
746 {
747         struct btrfs_block_group_cache *cache;
748
749         cache = block_group_cache_tree_search(info, bytenr, 0);
750
751         return cache;
752 }
753
754 /*
755  * return the block group that contains the given bytenr
756  */
757 struct btrfs_block_group_cache *btrfs_lookup_block_group(
758                                                  struct btrfs_fs_info *info,
759                                                  u64 bytenr)
760 {
761         struct btrfs_block_group_cache *cache;
762
763         cache = block_group_cache_tree_search(info, bytenr, 1);
764
765         return cache;
766 }
767
768 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
769                                                   u64 flags)
770 {
771         struct list_head *head = &info->space_info;
772         struct btrfs_space_info *found;
773
774         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
775
776         rcu_read_lock();
777         list_for_each_entry_rcu(found, head, list) {
778                 if (found->flags & flags) {
779                         rcu_read_unlock();
780                         return found;
781                 }
782         }
783         rcu_read_unlock();
784         return NULL;
785 }
786
787 /*
788  * after adding space to the filesystem, we need to clear the full flags
789  * on all the space infos.
790  */
791 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
792 {
793         struct list_head *head = &info->space_info;
794         struct btrfs_space_info *found;
795
796         rcu_read_lock();
797         list_for_each_entry_rcu(found, head, list)
798                 found->full = 0;
799         rcu_read_unlock();
800 }
801
802 /* simple helper to search for an existing data extent at a given offset */
803 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
804 {
805         int ret;
806         struct btrfs_key key;
807         struct btrfs_path *path;
808
809         path = btrfs_alloc_path();
810         if (!path)
811                 return -ENOMEM;
812
813         key.objectid = start;
814         key.offset = len;
815         key.type = BTRFS_EXTENT_ITEM_KEY;
816         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
817                                 0, 0);
818         btrfs_free_path(path);
819         return ret;
820 }
821
822 /*
823  * helper function to lookup reference count and flags of a tree block.
824  *
825  * the head node for delayed ref is used to store the sum of all the
826  * reference count modifications queued up in the rbtree. the head
827  * node may also store the extent flags to set. This way you can check
828  * to see what the reference count and extent flags would be if all of
829  * the delayed refs are not processed.
830  */
831 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
832                              struct btrfs_root *root, u64 bytenr,
833                              u64 offset, int metadata, u64 *refs, u64 *flags)
834 {
835         struct btrfs_delayed_ref_head *head;
836         struct btrfs_delayed_ref_root *delayed_refs;
837         struct btrfs_path *path;
838         struct btrfs_extent_item *ei;
839         struct extent_buffer *leaf;
840         struct btrfs_key key;
841         u32 item_size;
842         u64 num_refs;
843         u64 extent_flags;
844         int ret;
845
846         /*
847          * If we don't have skinny metadata, don't bother doing anything
848          * different
849          */
850         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
851                 offset = root->nodesize;
852                 metadata = 0;
853         }
854
855         path = btrfs_alloc_path();
856         if (!path)
857                 return -ENOMEM;
858
859         if (!trans) {
860                 path->skip_locking = 1;
861                 path->search_commit_root = 1;
862         }
863
864 search_again:
865         key.objectid = bytenr;
866         key.offset = offset;
867         if (metadata)
868                 key.type = BTRFS_METADATA_ITEM_KEY;
869         else
870                 key.type = BTRFS_EXTENT_ITEM_KEY;
871
872         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
873                                 &key, path, 0, 0);
874         if (ret < 0)
875                 goto out_free;
876
877         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
878                 if (path->slots[0]) {
879                         path->slots[0]--;
880                         btrfs_item_key_to_cpu(path->nodes[0], &key,
881                                               path->slots[0]);
882                         if (key.objectid == bytenr &&
883                             key.type == BTRFS_EXTENT_ITEM_KEY &&
884                             key.offset == root->nodesize)
885                                 ret = 0;
886                 }
887         }
888
889         if (ret == 0) {
890                 leaf = path->nodes[0];
891                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
892                 if (item_size >= sizeof(*ei)) {
893                         ei = btrfs_item_ptr(leaf, path->slots[0],
894                                             struct btrfs_extent_item);
895                         num_refs = btrfs_extent_refs(leaf, ei);
896                         extent_flags = btrfs_extent_flags(leaf, ei);
897                 } else {
898 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
899                         struct btrfs_extent_item_v0 *ei0;
900                         BUG_ON(item_size != sizeof(*ei0));
901                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
902                                              struct btrfs_extent_item_v0);
903                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
904                         /* FIXME: this isn't correct for data */
905                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
906 #else
907                         BUG();
908 #endif
909                 }
910                 BUG_ON(num_refs == 0);
911         } else {
912                 num_refs = 0;
913                 extent_flags = 0;
914                 ret = 0;
915         }
916
917         if (!trans)
918                 goto out;
919
920         delayed_refs = &trans->transaction->delayed_refs;
921         spin_lock(&delayed_refs->lock);
922         head = btrfs_find_delayed_ref_head(trans, bytenr);
923         if (head) {
924                 if (!mutex_trylock(&head->mutex)) {
925                         atomic_inc(&head->node.refs);
926                         spin_unlock(&delayed_refs->lock);
927
928                         btrfs_release_path(path);
929
930                         /*
931                          * Mutex was contended, block until it's released and try
932                          * again
933                          */
934                         mutex_lock(&head->mutex);
935                         mutex_unlock(&head->mutex);
936                         btrfs_put_delayed_ref(&head->node);
937                         goto search_again;
938                 }
939                 spin_lock(&head->lock);
940                 if (head->extent_op && head->extent_op->update_flags)
941                         extent_flags |= head->extent_op->flags_to_set;
942                 else
943                         BUG_ON(num_refs == 0);
944
945                 num_refs += head->node.ref_mod;
946                 spin_unlock(&head->lock);
947                 mutex_unlock(&head->mutex);
948         }
949         spin_unlock(&delayed_refs->lock);
950 out:
951         WARN_ON(num_refs == 0);
952         if (refs)
953                 *refs = num_refs;
954         if (flags)
955                 *flags = extent_flags;
956 out_free:
957         btrfs_free_path(path);
958         return ret;
959 }
960
961 /*
962  * Back reference rules.  Back refs have three main goals:
963  *
964  * 1) differentiate between all holders of references to an extent so that
965  *    when a reference is dropped we can make sure it was a valid reference
966  *    before freeing the extent.
967  *
968  * 2) Provide enough information to quickly find the holders of an extent
969  *    if we notice a given block is corrupted or bad.
970  *
971  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
972  *    maintenance.  This is actually the same as #2, but with a slightly
973  *    different use case.
974  *
975  * There are two kinds of back refs. The implicit back refs is optimized
976  * for pointers in non-shared tree blocks. For a given pointer in a block,
977  * back refs of this kind provide information about the block's owner tree
978  * and the pointer's key. These information allow us to find the block by
979  * b-tree searching. The full back refs is for pointers in tree blocks not
980  * referenced by their owner trees. The location of tree block is recorded
981  * in the back refs. Actually the full back refs is generic, and can be
982  * used in all cases the implicit back refs is used. The major shortcoming
983  * of the full back refs is its overhead. Every time a tree block gets
984  * COWed, we have to update back refs entry for all pointers in it.
985  *
986  * For a newly allocated tree block, we use implicit back refs for
987  * pointers in it. This means most tree related operations only involve
988  * implicit back refs. For a tree block created in old transaction, the
989  * only way to drop a reference to it is COW it. So we can detect the
990  * event that tree block loses its owner tree's reference and do the
991  * back refs conversion.
992  *
993  * When a tree block is COWed through a tree, there are four cases:
994  *
995  * The reference count of the block is one and the tree is the block's
996  * owner tree. Nothing to do in this case.
997  *
998  * The reference count of the block is one and the tree is not the
999  * block's owner tree. In this case, full back refs is used for pointers
1000  * in the block. Remove these full back refs, add implicit back refs for
1001  * every pointers in the new block.
1002  *
1003  * The reference count of the block is greater than one and the tree is
1004  * the block's owner tree. In this case, implicit back refs is used for
1005  * pointers in the block. Add full back refs for every pointers in the
1006  * block, increase lower level extents' reference counts. The original
1007  * implicit back refs are entailed to the new block.
1008  *
1009  * The reference count of the block is greater than one and the tree is
1010  * not the block's owner tree. Add implicit back refs for every pointer in
1011  * the new block, increase lower level extents' reference count.
1012  *
1013  * Back Reference Key composing:
1014  *
1015  * The key objectid corresponds to the first byte in the extent,
1016  * The key type is used to differentiate between types of back refs.
1017  * There are different meanings of the key offset for different types
1018  * of back refs.
1019  *
1020  * File extents can be referenced by:
1021  *
1022  * - multiple snapshots, subvolumes, or different generations in one subvol
1023  * - different files inside a single subvolume
1024  * - different offsets inside a file (bookend extents in file.c)
1025  *
1026  * The extent ref structure for the implicit back refs has fields for:
1027  *
1028  * - Objectid of the subvolume root
1029  * - objectid of the file holding the reference
1030  * - original offset in the file
1031  * - how many bookend extents
1032  *
1033  * The key offset for the implicit back refs is hash of the first
1034  * three fields.
1035  *
1036  * The extent ref structure for the full back refs has field for:
1037  *
1038  * - number of pointers in the tree leaf
1039  *
1040  * The key offset for the implicit back refs is the first byte of
1041  * the tree leaf
1042  *
1043  * When a file extent is allocated, The implicit back refs is used.
1044  * the fields are filled in:
1045  *
1046  *     (root_key.objectid, inode objectid, offset in file, 1)
1047  *
1048  * When a file extent is removed file truncation, we find the
1049  * corresponding implicit back refs and check the following fields:
1050  *
1051  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1052  *
1053  * Btree extents can be referenced by:
1054  *
1055  * - Different subvolumes
1056  *
1057  * Both the implicit back refs and the full back refs for tree blocks
1058  * only consist of key. The key offset for the implicit back refs is
1059  * objectid of block's owner tree. The key offset for the full back refs
1060  * is the first byte of parent block.
1061  *
1062  * When implicit back refs is used, information about the lowest key and
1063  * level of the tree block are required. These information are stored in
1064  * tree block info structure.
1065  */
1066
1067 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1068 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1069                                   struct btrfs_root *root,
1070                                   struct btrfs_path *path,
1071                                   u64 owner, u32 extra_size)
1072 {
1073         struct btrfs_extent_item *item;
1074         struct btrfs_extent_item_v0 *ei0;
1075         struct btrfs_extent_ref_v0 *ref0;
1076         struct btrfs_tree_block_info *bi;
1077         struct extent_buffer *leaf;
1078         struct btrfs_key key;
1079         struct btrfs_key found_key;
1080         u32 new_size = sizeof(*item);
1081         u64 refs;
1082         int ret;
1083
1084         leaf = path->nodes[0];
1085         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1086
1087         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1088         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1089                              struct btrfs_extent_item_v0);
1090         refs = btrfs_extent_refs_v0(leaf, ei0);
1091
1092         if (owner == (u64)-1) {
1093                 while (1) {
1094                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1095                                 ret = btrfs_next_leaf(root, path);
1096                                 if (ret < 0)
1097                                         return ret;
1098                                 BUG_ON(ret > 0); /* Corruption */
1099                                 leaf = path->nodes[0];
1100                         }
1101                         btrfs_item_key_to_cpu(leaf, &found_key,
1102                                               path->slots[0]);
1103                         BUG_ON(key.objectid != found_key.objectid);
1104                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1105                                 path->slots[0]++;
1106                                 continue;
1107                         }
1108                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1109                                               struct btrfs_extent_ref_v0);
1110                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1111                         break;
1112                 }
1113         }
1114         btrfs_release_path(path);
1115
1116         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1117                 new_size += sizeof(*bi);
1118
1119         new_size -= sizeof(*ei0);
1120         ret = btrfs_search_slot(trans, root, &key, path,
1121                                 new_size + extra_size, 1);
1122         if (ret < 0)
1123                 return ret;
1124         BUG_ON(ret); /* Corruption */
1125
1126         btrfs_extend_item(root, path, new_size);
1127
1128         leaf = path->nodes[0];
1129         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1130         btrfs_set_extent_refs(leaf, item, refs);
1131         /* FIXME: get real generation */
1132         btrfs_set_extent_generation(leaf, item, 0);
1133         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1134                 btrfs_set_extent_flags(leaf, item,
1135                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1136                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1137                 bi = (struct btrfs_tree_block_info *)(item + 1);
1138                 /* FIXME: get first key of the block */
1139                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1140                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1141         } else {
1142                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1143         }
1144         btrfs_mark_buffer_dirty(leaf);
1145         return 0;
1146 }
1147 #endif
1148
1149 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1150 {
1151         u32 high_crc = ~(u32)0;
1152         u32 low_crc = ~(u32)0;
1153         __le64 lenum;
1154
1155         lenum = cpu_to_le64(root_objectid);
1156         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1157         lenum = cpu_to_le64(owner);
1158         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1159         lenum = cpu_to_le64(offset);
1160         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1161
1162         return ((u64)high_crc << 31) ^ (u64)low_crc;
1163 }
1164
1165 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1166                                      struct btrfs_extent_data_ref *ref)
1167 {
1168         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1169                                     btrfs_extent_data_ref_objectid(leaf, ref),
1170                                     btrfs_extent_data_ref_offset(leaf, ref));
1171 }
1172
1173 static int match_extent_data_ref(struct extent_buffer *leaf,
1174                                  struct btrfs_extent_data_ref *ref,
1175                                  u64 root_objectid, u64 owner, u64 offset)
1176 {
1177         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1178             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1179             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1180                 return 0;
1181         return 1;
1182 }
1183
1184 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1185                                            struct btrfs_root *root,
1186                                            struct btrfs_path *path,
1187                                            u64 bytenr, u64 parent,
1188                                            u64 root_objectid,
1189                                            u64 owner, u64 offset)
1190 {
1191         struct btrfs_key key;
1192         struct btrfs_extent_data_ref *ref;
1193         struct extent_buffer *leaf;
1194         u32 nritems;
1195         int ret;
1196         int recow;
1197         int err = -ENOENT;
1198
1199         key.objectid = bytenr;
1200         if (parent) {
1201                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1202                 key.offset = parent;
1203         } else {
1204                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1205                 key.offset = hash_extent_data_ref(root_objectid,
1206                                                   owner, offset);
1207         }
1208 again:
1209         recow = 0;
1210         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1211         if (ret < 0) {
1212                 err = ret;
1213                 goto fail;
1214         }
1215
1216         if (parent) {
1217                 if (!ret)
1218                         return 0;
1219 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1220                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1221                 btrfs_release_path(path);
1222                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1223                 if (ret < 0) {
1224                         err = ret;
1225                         goto fail;
1226                 }
1227                 if (!ret)
1228                         return 0;
1229 #endif
1230                 goto fail;
1231         }
1232
1233         leaf = path->nodes[0];
1234         nritems = btrfs_header_nritems(leaf);
1235         while (1) {
1236                 if (path->slots[0] >= nritems) {
1237                         ret = btrfs_next_leaf(root, path);
1238                         if (ret < 0)
1239                                 err = ret;
1240                         if (ret)
1241                                 goto fail;
1242
1243                         leaf = path->nodes[0];
1244                         nritems = btrfs_header_nritems(leaf);
1245                         recow = 1;
1246                 }
1247
1248                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1249                 if (key.objectid != bytenr ||
1250                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1251                         goto fail;
1252
1253                 ref = btrfs_item_ptr(leaf, path->slots[0],
1254                                      struct btrfs_extent_data_ref);
1255
1256                 if (match_extent_data_ref(leaf, ref, root_objectid,
1257                                           owner, offset)) {
1258                         if (recow) {
1259                                 btrfs_release_path(path);
1260                                 goto again;
1261                         }
1262                         err = 0;
1263                         break;
1264                 }
1265                 path->slots[0]++;
1266         }
1267 fail:
1268         return err;
1269 }
1270
1271 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1272                                            struct btrfs_root *root,
1273                                            struct btrfs_path *path,
1274                                            u64 bytenr, u64 parent,
1275                                            u64 root_objectid, u64 owner,
1276                                            u64 offset, int refs_to_add)
1277 {
1278         struct btrfs_key key;
1279         struct extent_buffer *leaf;
1280         u32 size;
1281         u32 num_refs;
1282         int ret;
1283
1284         key.objectid = bytenr;
1285         if (parent) {
1286                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1287                 key.offset = parent;
1288                 size = sizeof(struct btrfs_shared_data_ref);
1289         } else {
1290                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1291                 key.offset = hash_extent_data_ref(root_objectid,
1292                                                   owner, offset);
1293                 size = sizeof(struct btrfs_extent_data_ref);
1294         }
1295
1296         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1297         if (ret && ret != -EEXIST)
1298                 goto fail;
1299
1300         leaf = path->nodes[0];
1301         if (parent) {
1302                 struct btrfs_shared_data_ref *ref;
1303                 ref = btrfs_item_ptr(leaf, path->slots[0],
1304                                      struct btrfs_shared_data_ref);
1305                 if (ret == 0) {
1306                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1307                 } else {
1308                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1309                         num_refs += refs_to_add;
1310                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1311                 }
1312         } else {
1313                 struct btrfs_extent_data_ref *ref;
1314                 while (ret == -EEXIST) {
1315                         ref = btrfs_item_ptr(leaf, path->slots[0],
1316                                              struct btrfs_extent_data_ref);
1317                         if (match_extent_data_ref(leaf, ref, root_objectid,
1318                                                   owner, offset))
1319                                 break;
1320                         btrfs_release_path(path);
1321                         key.offset++;
1322                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1323                                                       size);
1324                         if (ret && ret != -EEXIST)
1325                                 goto fail;
1326
1327                         leaf = path->nodes[0];
1328                 }
1329                 ref = btrfs_item_ptr(leaf, path->slots[0],
1330                                      struct btrfs_extent_data_ref);
1331                 if (ret == 0) {
1332                         btrfs_set_extent_data_ref_root(leaf, ref,
1333                                                        root_objectid);
1334                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1335                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1336                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1337                 } else {
1338                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1339                         num_refs += refs_to_add;
1340                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1341                 }
1342         }
1343         btrfs_mark_buffer_dirty(leaf);
1344         ret = 0;
1345 fail:
1346         btrfs_release_path(path);
1347         return ret;
1348 }
1349
1350 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1351                                            struct btrfs_root *root,
1352                                            struct btrfs_path *path,
1353                                            int refs_to_drop, int *last_ref)
1354 {
1355         struct btrfs_key key;
1356         struct btrfs_extent_data_ref *ref1 = NULL;
1357         struct btrfs_shared_data_ref *ref2 = NULL;
1358         struct extent_buffer *leaf;
1359         u32 num_refs = 0;
1360         int ret = 0;
1361
1362         leaf = path->nodes[0];
1363         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1364
1365         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1366                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1367                                       struct btrfs_extent_data_ref);
1368                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1369         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1370                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1371                                       struct btrfs_shared_data_ref);
1372                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1373 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1374         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1375                 struct btrfs_extent_ref_v0 *ref0;
1376                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1377                                       struct btrfs_extent_ref_v0);
1378                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1379 #endif
1380         } else {
1381                 BUG();
1382         }
1383
1384         BUG_ON(num_refs < refs_to_drop);
1385         num_refs -= refs_to_drop;
1386
1387         if (num_refs == 0) {
1388                 ret = btrfs_del_item(trans, root, path);
1389                 *last_ref = 1;
1390         } else {
1391                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1392                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1393                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1394                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1395 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1396                 else {
1397                         struct btrfs_extent_ref_v0 *ref0;
1398                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1399                                         struct btrfs_extent_ref_v0);
1400                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1401                 }
1402 #endif
1403                 btrfs_mark_buffer_dirty(leaf);
1404         }
1405         return ret;
1406 }
1407
1408 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1409                                           struct btrfs_extent_inline_ref *iref)
1410 {
1411         struct btrfs_key key;
1412         struct extent_buffer *leaf;
1413         struct btrfs_extent_data_ref *ref1;
1414         struct btrfs_shared_data_ref *ref2;
1415         u32 num_refs = 0;
1416
1417         leaf = path->nodes[0];
1418         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1419         if (iref) {
1420                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1421                     BTRFS_EXTENT_DATA_REF_KEY) {
1422                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1423                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1424                 } else {
1425                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1426                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1427                 }
1428         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1429                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1430                                       struct btrfs_extent_data_ref);
1431                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1432         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1433                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1434                                       struct btrfs_shared_data_ref);
1435                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1436 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1437         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1438                 struct btrfs_extent_ref_v0 *ref0;
1439                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1440                                       struct btrfs_extent_ref_v0);
1441                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1442 #endif
1443         } else {
1444                 WARN_ON(1);
1445         }
1446         return num_refs;
1447 }
1448
1449 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1450                                           struct btrfs_root *root,
1451                                           struct btrfs_path *path,
1452                                           u64 bytenr, u64 parent,
1453                                           u64 root_objectid)
1454 {
1455         struct btrfs_key key;
1456         int ret;
1457
1458         key.objectid = bytenr;
1459         if (parent) {
1460                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1461                 key.offset = parent;
1462         } else {
1463                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1464                 key.offset = root_objectid;
1465         }
1466
1467         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1468         if (ret > 0)
1469                 ret = -ENOENT;
1470 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1471         if (ret == -ENOENT && parent) {
1472                 btrfs_release_path(path);
1473                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1474                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1475                 if (ret > 0)
1476                         ret = -ENOENT;
1477         }
1478 #endif
1479         return ret;
1480 }
1481
1482 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1483                                           struct btrfs_root *root,
1484                                           struct btrfs_path *path,
1485                                           u64 bytenr, u64 parent,
1486                                           u64 root_objectid)
1487 {
1488         struct btrfs_key key;
1489         int ret;
1490
1491         key.objectid = bytenr;
1492         if (parent) {
1493                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1494                 key.offset = parent;
1495         } else {
1496                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1497                 key.offset = root_objectid;
1498         }
1499
1500         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1501         btrfs_release_path(path);
1502         return ret;
1503 }
1504
1505 static inline int extent_ref_type(u64 parent, u64 owner)
1506 {
1507         int type;
1508         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1509                 if (parent > 0)
1510                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1511                 else
1512                         type = BTRFS_TREE_BLOCK_REF_KEY;
1513         } else {
1514                 if (parent > 0)
1515                         type = BTRFS_SHARED_DATA_REF_KEY;
1516                 else
1517                         type = BTRFS_EXTENT_DATA_REF_KEY;
1518         }
1519         return type;
1520 }
1521
1522 static int find_next_key(struct btrfs_path *path, int level,
1523                          struct btrfs_key *key)
1524
1525 {
1526         for (; level < BTRFS_MAX_LEVEL; level++) {
1527                 if (!path->nodes[level])
1528                         break;
1529                 if (path->slots[level] + 1 >=
1530                     btrfs_header_nritems(path->nodes[level]))
1531                         continue;
1532                 if (level == 0)
1533                         btrfs_item_key_to_cpu(path->nodes[level], key,
1534                                               path->slots[level] + 1);
1535                 else
1536                         btrfs_node_key_to_cpu(path->nodes[level], key,
1537                                               path->slots[level] + 1);
1538                 return 0;
1539         }
1540         return 1;
1541 }
1542
1543 /*
1544  * look for inline back ref. if back ref is found, *ref_ret is set
1545  * to the address of inline back ref, and 0 is returned.
1546  *
1547  * if back ref isn't found, *ref_ret is set to the address where it
1548  * should be inserted, and -ENOENT is returned.
1549  *
1550  * if insert is true and there are too many inline back refs, the path
1551  * points to the extent item, and -EAGAIN is returned.
1552  *
1553  * NOTE: inline back refs are ordered in the same way that back ref
1554  *       items in the tree are ordered.
1555  */
1556 static noinline_for_stack
1557 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1558                                  struct btrfs_root *root,
1559                                  struct btrfs_path *path,
1560                                  struct btrfs_extent_inline_ref **ref_ret,
1561                                  u64 bytenr, u64 num_bytes,
1562                                  u64 parent, u64 root_objectid,
1563                                  u64 owner, u64 offset, int insert)
1564 {
1565         struct btrfs_key key;
1566         struct extent_buffer *leaf;
1567         struct btrfs_extent_item *ei;
1568         struct btrfs_extent_inline_ref *iref;
1569         u64 flags;
1570         u64 item_size;
1571         unsigned long ptr;
1572         unsigned long end;
1573         int extra_size;
1574         int type;
1575         int want;
1576         int ret;
1577         int err = 0;
1578         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1579                                                  SKINNY_METADATA);
1580
1581         key.objectid = bytenr;
1582         key.type = BTRFS_EXTENT_ITEM_KEY;
1583         key.offset = num_bytes;
1584
1585         want = extent_ref_type(parent, owner);
1586         if (insert) {
1587                 extra_size = btrfs_extent_inline_ref_size(want);
1588                 path->keep_locks = 1;
1589         } else
1590                 extra_size = -1;
1591
1592         /*
1593          * Owner is our parent level, so we can just add one to get the level
1594          * for the block we are interested in.
1595          */
1596         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1597                 key.type = BTRFS_METADATA_ITEM_KEY;
1598                 key.offset = owner;
1599         }
1600
1601 again:
1602         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1603         if (ret < 0) {
1604                 err = ret;
1605                 goto out;
1606         }
1607
1608         /*
1609          * We may be a newly converted file system which still has the old fat
1610          * extent entries for metadata, so try and see if we have one of those.
1611          */
1612         if (ret > 0 && skinny_metadata) {
1613                 skinny_metadata = false;
1614                 if (path->slots[0]) {
1615                         path->slots[0]--;
1616                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1617                                               path->slots[0]);
1618                         if (key.objectid == bytenr &&
1619                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1620                             key.offset == num_bytes)
1621                                 ret = 0;
1622                 }
1623                 if (ret) {
1624                         key.objectid = bytenr;
1625                         key.type = BTRFS_EXTENT_ITEM_KEY;
1626                         key.offset = num_bytes;
1627                         btrfs_release_path(path);
1628                         goto again;
1629                 }
1630         }
1631
1632         if (ret && !insert) {
1633                 err = -ENOENT;
1634                 goto out;
1635         } else if (WARN_ON(ret)) {
1636                 err = -EIO;
1637                 goto out;
1638         }
1639
1640         leaf = path->nodes[0];
1641         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1642 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1643         if (item_size < sizeof(*ei)) {
1644                 if (!insert) {
1645                         err = -ENOENT;
1646                         goto out;
1647                 }
1648                 ret = convert_extent_item_v0(trans, root, path, owner,
1649                                              extra_size);
1650                 if (ret < 0) {
1651                         err = ret;
1652                         goto out;
1653                 }
1654                 leaf = path->nodes[0];
1655                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1656         }
1657 #endif
1658         BUG_ON(item_size < sizeof(*ei));
1659
1660         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1661         flags = btrfs_extent_flags(leaf, ei);
1662
1663         ptr = (unsigned long)(ei + 1);
1664         end = (unsigned long)ei + item_size;
1665
1666         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1667                 ptr += sizeof(struct btrfs_tree_block_info);
1668                 BUG_ON(ptr > end);
1669         }
1670
1671         err = -ENOENT;
1672         while (1) {
1673                 if (ptr >= end) {
1674                         WARN_ON(ptr > end);
1675                         break;
1676                 }
1677                 iref = (struct btrfs_extent_inline_ref *)ptr;
1678                 type = btrfs_extent_inline_ref_type(leaf, iref);
1679                 if (want < type)
1680                         break;
1681                 if (want > type) {
1682                         ptr += btrfs_extent_inline_ref_size(type);
1683                         continue;
1684                 }
1685
1686                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1687                         struct btrfs_extent_data_ref *dref;
1688                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1689                         if (match_extent_data_ref(leaf, dref, root_objectid,
1690                                                   owner, offset)) {
1691                                 err = 0;
1692                                 break;
1693                         }
1694                         if (hash_extent_data_ref_item(leaf, dref) <
1695                             hash_extent_data_ref(root_objectid, owner, offset))
1696                                 break;
1697                 } else {
1698                         u64 ref_offset;
1699                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1700                         if (parent > 0) {
1701                                 if (parent == ref_offset) {
1702                                         err = 0;
1703                                         break;
1704                                 }
1705                                 if (ref_offset < parent)
1706                                         break;
1707                         } else {
1708                                 if (root_objectid == ref_offset) {
1709                                         err = 0;
1710                                         break;
1711                                 }
1712                                 if (ref_offset < root_objectid)
1713                                         break;
1714                         }
1715                 }
1716                 ptr += btrfs_extent_inline_ref_size(type);
1717         }
1718         if (err == -ENOENT && insert) {
1719                 if (item_size + extra_size >=
1720                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1721                         err = -EAGAIN;
1722                         goto out;
1723                 }
1724                 /*
1725                  * To add new inline back ref, we have to make sure
1726                  * there is no corresponding back ref item.
1727                  * For simplicity, we just do not add new inline back
1728                  * ref if there is any kind of item for this block
1729                  */
1730                 if (find_next_key(path, 0, &key) == 0 &&
1731                     key.objectid == bytenr &&
1732                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1733                         err = -EAGAIN;
1734                         goto out;
1735                 }
1736         }
1737         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1738 out:
1739         if (insert) {
1740                 path->keep_locks = 0;
1741                 btrfs_unlock_up_safe(path, 1);
1742         }
1743         return err;
1744 }
1745
1746 /*
1747  * helper to add new inline back ref
1748  */
1749 static noinline_for_stack
1750 void setup_inline_extent_backref(struct btrfs_root *root,
1751                                  struct btrfs_path *path,
1752                                  struct btrfs_extent_inline_ref *iref,
1753                                  u64 parent, u64 root_objectid,
1754                                  u64 owner, u64 offset, int refs_to_add,
1755                                  struct btrfs_delayed_extent_op *extent_op)
1756 {
1757         struct extent_buffer *leaf;
1758         struct btrfs_extent_item *ei;
1759         unsigned long ptr;
1760         unsigned long end;
1761         unsigned long item_offset;
1762         u64 refs;
1763         int size;
1764         int type;
1765
1766         leaf = path->nodes[0];
1767         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1768         item_offset = (unsigned long)iref - (unsigned long)ei;
1769
1770         type = extent_ref_type(parent, owner);
1771         size = btrfs_extent_inline_ref_size(type);
1772
1773         btrfs_extend_item(root, path, size);
1774
1775         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1776         refs = btrfs_extent_refs(leaf, ei);
1777         refs += refs_to_add;
1778         btrfs_set_extent_refs(leaf, ei, refs);
1779         if (extent_op)
1780                 __run_delayed_extent_op(extent_op, leaf, ei);
1781
1782         ptr = (unsigned long)ei + item_offset;
1783         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1784         if (ptr < end - size)
1785                 memmove_extent_buffer(leaf, ptr + size, ptr,
1786                                       end - size - ptr);
1787
1788         iref = (struct btrfs_extent_inline_ref *)ptr;
1789         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 struct btrfs_extent_data_ref *dref;
1792                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1793                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1794                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1795                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1796                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1797         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1798                 struct btrfs_shared_data_ref *sref;
1799                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1800                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1801                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1802         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1803                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1804         } else {
1805                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1806         }
1807         btrfs_mark_buffer_dirty(leaf);
1808 }
1809
1810 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1811                                  struct btrfs_root *root,
1812                                  struct btrfs_path *path,
1813                                  struct btrfs_extent_inline_ref **ref_ret,
1814                                  u64 bytenr, u64 num_bytes, u64 parent,
1815                                  u64 root_objectid, u64 owner, u64 offset)
1816 {
1817         int ret;
1818
1819         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1820                                            bytenr, num_bytes, parent,
1821                                            root_objectid, owner, offset, 0);
1822         if (ret != -ENOENT)
1823                 return ret;
1824
1825         btrfs_release_path(path);
1826         *ref_ret = NULL;
1827
1828         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1829                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1830                                             root_objectid);
1831         } else {
1832                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1833                                              root_objectid, owner, offset);
1834         }
1835         return ret;
1836 }
1837
1838 /*
1839  * helper to update/remove inline back ref
1840  */
1841 static noinline_for_stack
1842 void update_inline_extent_backref(struct btrfs_root *root,
1843                                   struct btrfs_path *path,
1844                                   struct btrfs_extent_inline_ref *iref,
1845                                   int refs_to_mod,
1846                                   struct btrfs_delayed_extent_op *extent_op,
1847                                   int *last_ref)
1848 {
1849         struct extent_buffer *leaf;
1850         struct btrfs_extent_item *ei;
1851         struct btrfs_extent_data_ref *dref = NULL;
1852         struct btrfs_shared_data_ref *sref = NULL;
1853         unsigned long ptr;
1854         unsigned long end;
1855         u32 item_size;
1856         int size;
1857         int type;
1858         u64 refs;
1859
1860         leaf = path->nodes[0];
1861         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1862         refs = btrfs_extent_refs(leaf, ei);
1863         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1864         refs += refs_to_mod;
1865         btrfs_set_extent_refs(leaf, ei, refs);
1866         if (extent_op)
1867                 __run_delayed_extent_op(extent_op, leaf, ei);
1868
1869         type = btrfs_extent_inline_ref_type(leaf, iref);
1870
1871         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1872                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1873                 refs = btrfs_extent_data_ref_count(leaf, dref);
1874         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1875                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1876                 refs = btrfs_shared_data_ref_count(leaf, sref);
1877         } else {
1878                 refs = 1;
1879                 BUG_ON(refs_to_mod != -1);
1880         }
1881
1882         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1883         refs += refs_to_mod;
1884
1885         if (refs > 0) {
1886                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1887                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1888                 else
1889                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1890         } else {
1891                 *last_ref = 1;
1892                 size =  btrfs_extent_inline_ref_size(type);
1893                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1894                 ptr = (unsigned long)iref;
1895                 end = (unsigned long)ei + item_size;
1896                 if (ptr + size < end)
1897                         memmove_extent_buffer(leaf, ptr, ptr + size,
1898                                               end - ptr - size);
1899                 item_size -= size;
1900                 btrfs_truncate_item(root, path, item_size, 1);
1901         }
1902         btrfs_mark_buffer_dirty(leaf);
1903 }
1904
1905 static noinline_for_stack
1906 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1907                                  struct btrfs_root *root,
1908                                  struct btrfs_path *path,
1909                                  u64 bytenr, u64 num_bytes, u64 parent,
1910                                  u64 root_objectid, u64 owner,
1911                                  u64 offset, int refs_to_add,
1912                                  struct btrfs_delayed_extent_op *extent_op)
1913 {
1914         struct btrfs_extent_inline_ref *iref;
1915         int ret;
1916
1917         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1918                                            bytenr, num_bytes, parent,
1919                                            root_objectid, owner, offset, 1);
1920         if (ret == 0) {
1921                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1922                 update_inline_extent_backref(root, path, iref,
1923                                              refs_to_add, extent_op, NULL);
1924         } else if (ret == -ENOENT) {
1925                 setup_inline_extent_backref(root, path, iref, parent,
1926                                             root_objectid, owner, offset,
1927                                             refs_to_add, extent_op);
1928                 ret = 0;
1929         }
1930         return ret;
1931 }
1932
1933 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1934                                  struct btrfs_root *root,
1935                                  struct btrfs_path *path,
1936                                  u64 bytenr, u64 parent, u64 root_objectid,
1937                                  u64 owner, u64 offset, int refs_to_add)
1938 {
1939         int ret;
1940         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1941                 BUG_ON(refs_to_add != 1);
1942                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1943                                             parent, root_objectid);
1944         } else {
1945                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1946                                              parent, root_objectid,
1947                                              owner, offset, refs_to_add);
1948         }
1949         return ret;
1950 }
1951
1952 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1953                                  struct btrfs_root *root,
1954                                  struct btrfs_path *path,
1955                                  struct btrfs_extent_inline_ref *iref,
1956                                  int refs_to_drop, int is_data, int *last_ref)
1957 {
1958         int ret = 0;
1959
1960         BUG_ON(!is_data && refs_to_drop != 1);
1961         if (iref) {
1962                 update_inline_extent_backref(root, path, iref,
1963                                              -refs_to_drop, NULL, last_ref);
1964         } else if (is_data) {
1965                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1966                                              last_ref);
1967         } else {
1968                 *last_ref = 1;
1969                 ret = btrfs_del_item(trans, root, path);
1970         }
1971         return ret;
1972 }
1973
1974 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1975 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1976                                u64 *discarded_bytes)
1977 {
1978         int j, ret = 0;
1979         u64 bytes_left, end;
1980         u64 aligned_start = ALIGN(start, 1 << 9);
1981
1982         if (WARN_ON(start != aligned_start)) {
1983                 len -= aligned_start - start;
1984                 len = round_down(len, 1 << 9);
1985                 start = aligned_start;
1986         }
1987
1988         *discarded_bytes = 0;
1989
1990         if (!len)
1991                 return 0;
1992
1993         end = start + len;
1994         bytes_left = len;
1995
1996         /* Skip any superblocks on this device. */
1997         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1998                 u64 sb_start = btrfs_sb_offset(j);
1999                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
2000                 u64 size = sb_start - start;
2001
2002                 if (!in_range(sb_start, start, bytes_left) &&
2003                     !in_range(sb_end, start, bytes_left) &&
2004                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
2005                         continue;
2006
2007                 /*
2008                  * Superblock spans beginning of range.  Adjust start and
2009                  * try again.
2010                  */
2011                 if (sb_start <= start) {
2012                         start += sb_end - start;
2013                         if (start > end) {
2014                                 bytes_left = 0;
2015                                 break;
2016                         }
2017                         bytes_left = end - start;
2018                         continue;
2019                 }
2020
2021                 if (size) {
2022                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2023                                                    GFP_NOFS, 0);
2024                         if (!ret)
2025                                 *discarded_bytes += size;
2026                         else if (ret != -EOPNOTSUPP)
2027                                 return ret;
2028                 }
2029
2030                 start = sb_end;
2031                 if (start > end) {
2032                         bytes_left = 0;
2033                         break;
2034                 }
2035                 bytes_left = end - start;
2036         }
2037
2038         if (bytes_left) {
2039                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2040                                            GFP_NOFS, 0);
2041                 if (!ret)
2042                         *discarded_bytes += bytes_left;
2043         }
2044         return ret;
2045 }
2046
2047 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2048                          u64 num_bytes, u64 *actual_bytes)
2049 {
2050         int ret;
2051         u64 discarded_bytes = 0;
2052         struct btrfs_bio *bbio = NULL;
2053
2054
2055         /*
2056          * Avoid races with device replace and make sure our bbio has devices
2057          * associated to its stripes that don't go away while we are discarding.
2058          */
2059         btrfs_bio_counter_inc_blocked(root->fs_info);
2060         /* Tell the block device(s) that the sectors can be discarded */
2061         ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
2062                               bytenr, &num_bytes, &bbio, 0);
2063         /* Error condition is -ENOMEM */
2064         if (!ret) {
2065                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2066                 int i;
2067
2068
2069                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2070                         u64 bytes;
2071                         if (!stripe->dev->can_discard)
2072                                 continue;
2073
2074                         ret = btrfs_issue_discard(stripe->dev->bdev,
2075                                                   stripe->physical,
2076                                                   stripe->length,
2077                                                   &bytes);
2078                         if (!ret)
2079                                 discarded_bytes += bytes;
2080                         else if (ret != -EOPNOTSUPP)
2081                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2082
2083                         /*
2084                          * Just in case we get back EOPNOTSUPP for some reason,
2085                          * just ignore the return value so we don't screw up
2086                          * people calling discard_extent.
2087                          */
2088                         ret = 0;
2089                 }
2090                 btrfs_put_bbio(bbio);
2091         }
2092         btrfs_bio_counter_dec(root->fs_info);
2093
2094         if (actual_bytes)
2095                 *actual_bytes = discarded_bytes;
2096
2097
2098         if (ret == -EOPNOTSUPP)
2099                 ret = 0;
2100         return ret;
2101 }
2102
2103 /* Can return -ENOMEM */
2104 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2105                          struct btrfs_root *root,
2106                          u64 bytenr, u64 num_bytes, u64 parent,
2107                          u64 root_objectid, u64 owner, u64 offset)
2108 {
2109         int ret;
2110         struct btrfs_fs_info *fs_info = root->fs_info;
2111
2112         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2113                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2114
2115         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2116                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2117                                         num_bytes,
2118                                         parent, root_objectid, (int)owner,
2119                                         BTRFS_ADD_DELAYED_REF, NULL);
2120         } else {
2121                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2122                                         num_bytes, parent, root_objectid,
2123                                         owner, offset, 0,
2124                                         BTRFS_ADD_DELAYED_REF, NULL);
2125         }
2126         return ret;
2127 }
2128
2129 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2130                                   struct btrfs_root *root,
2131                                   struct btrfs_delayed_ref_node *node,
2132                                   u64 parent, u64 root_objectid,
2133                                   u64 owner, u64 offset, int refs_to_add,
2134                                   struct btrfs_delayed_extent_op *extent_op)
2135 {
2136         struct btrfs_fs_info *fs_info = root->fs_info;
2137         struct btrfs_path *path;
2138         struct extent_buffer *leaf;
2139         struct btrfs_extent_item *item;
2140         struct btrfs_key key;
2141         u64 bytenr = node->bytenr;
2142         u64 num_bytes = node->num_bytes;
2143         u64 refs;
2144         int ret;
2145
2146         path = btrfs_alloc_path();
2147         if (!path)
2148                 return -ENOMEM;
2149
2150         path->reada = READA_FORWARD;
2151         path->leave_spinning = 1;
2152         /* this will setup the path even if it fails to insert the back ref */
2153         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2154                                            bytenr, num_bytes, parent,
2155                                            root_objectid, owner, offset,
2156                                            refs_to_add, extent_op);
2157         if ((ret < 0 && ret != -EAGAIN) || !ret)
2158                 goto out;
2159
2160         /*
2161          * Ok we had -EAGAIN which means we didn't have space to insert and
2162          * inline extent ref, so just update the reference count and add a
2163          * normal backref.
2164          */
2165         leaf = path->nodes[0];
2166         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2167         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2168         refs = btrfs_extent_refs(leaf, item);
2169         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2170         if (extent_op)
2171                 __run_delayed_extent_op(extent_op, leaf, item);
2172
2173         btrfs_mark_buffer_dirty(leaf);
2174         btrfs_release_path(path);
2175
2176         path->reada = READA_FORWARD;
2177         path->leave_spinning = 1;
2178         /* now insert the actual backref */
2179         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2180                                     path, bytenr, parent, root_objectid,
2181                                     owner, offset, refs_to_add);
2182         if (ret)
2183                 btrfs_abort_transaction(trans, root, ret);
2184 out:
2185         btrfs_free_path(path);
2186         return ret;
2187 }
2188
2189 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2190                                 struct btrfs_root *root,
2191                                 struct btrfs_delayed_ref_node *node,
2192                                 struct btrfs_delayed_extent_op *extent_op,
2193                                 int insert_reserved)
2194 {
2195         int ret = 0;
2196         struct btrfs_delayed_data_ref *ref;
2197         struct btrfs_key ins;
2198         u64 parent = 0;
2199         u64 ref_root = 0;
2200         u64 flags = 0;
2201
2202         ins.objectid = node->bytenr;
2203         ins.offset = node->num_bytes;
2204         ins.type = BTRFS_EXTENT_ITEM_KEY;
2205
2206         ref = btrfs_delayed_node_to_data_ref(node);
2207         trace_run_delayed_data_ref(node, ref, node->action);
2208
2209         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2210                 parent = ref->parent;
2211         ref_root = ref->root;
2212
2213         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2214                 if (extent_op)
2215                         flags |= extent_op->flags_to_set;
2216                 ret = alloc_reserved_file_extent(trans, root,
2217                                                  parent, ref_root, flags,
2218                                                  ref->objectid, ref->offset,
2219                                                  &ins, node->ref_mod);
2220         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2221                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2222                                              ref_root, ref->objectid,
2223                                              ref->offset, node->ref_mod,
2224                                              extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node, parent,
2227                                           ref_root, ref->objectid,
2228                                           ref->offset, node->ref_mod,
2229                                           extent_op);
2230         } else {
2231                 BUG();
2232         }
2233         return ret;
2234 }
2235
2236 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2237                                     struct extent_buffer *leaf,
2238                                     struct btrfs_extent_item *ei)
2239 {
2240         u64 flags = btrfs_extent_flags(leaf, ei);
2241         if (extent_op->update_flags) {
2242                 flags |= extent_op->flags_to_set;
2243                 btrfs_set_extent_flags(leaf, ei, flags);
2244         }
2245
2246         if (extent_op->update_key) {
2247                 struct btrfs_tree_block_info *bi;
2248                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2249                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2250                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2251         }
2252 }
2253
2254 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2255                                  struct btrfs_root *root,
2256                                  struct btrfs_delayed_ref_node *node,
2257                                  struct btrfs_delayed_extent_op *extent_op)
2258 {
2259         struct btrfs_key key;
2260         struct btrfs_path *path;
2261         struct btrfs_extent_item *ei;
2262         struct extent_buffer *leaf;
2263         u32 item_size;
2264         int ret;
2265         int err = 0;
2266         int metadata = !extent_op->is_data;
2267
2268         if (trans->aborted)
2269                 return 0;
2270
2271         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2272                 metadata = 0;
2273
2274         path = btrfs_alloc_path();
2275         if (!path)
2276                 return -ENOMEM;
2277
2278         key.objectid = node->bytenr;
2279
2280         if (metadata) {
2281                 key.type = BTRFS_METADATA_ITEM_KEY;
2282                 key.offset = extent_op->level;
2283         } else {
2284                 key.type = BTRFS_EXTENT_ITEM_KEY;
2285                 key.offset = node->num_bytes;
2286         }
2287
2288 again:
2289         path->reada = READA_FORWARD;
2290         path->leave_spinning = 1;
2291         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2292                                 path, 0, 1);
2293         if (ret < 0) {
2294                 err = ret;
2295                 goto out;
2296         }
2297         if (ret > 0) {
2298                 if (metadata) {
2299                         if (path->slots[0] > 0) {
2300                                 path->slots[0]--;
2301                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2302                                                       path->slots[0]);
2303                                 if (key.objectid == node->bytenr &&
2304                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2305                                     key.offset == node->num_bytes)
2306                                         ret = 0;
2307                         }
2308                         if (ret > 0) {
2309                                 btrfs_release_path(path);
2310                                 metadata = 0;
2311
2312                                 key.objectid = node->bytenr;
2313                                 key.offset = node->num_bytes;
2314                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2315                                 goto again;
2316                         }
2317                 } else {
2318                         err = -EIO;
2319                         goto out;
2320                 }
2321         }
2322
2323         leaf = path->nodes[0];
2324         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2325 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2326         if (item_size < sizeof(*ei)) {
2327                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2328                                              path, (u64)-1, 0);
2329                 if (ret < 0) {
2330                         err = ret;
2331                         goto out;
2332                 }
2333                 leaf = path->nodes[0];
2334                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2335         }
2336 #endif
2337         BUG_ON(item_size < sizeof(*ei));
2338         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2339         __run_delayed_extent_op(extent_op, leaf, ei);
2340
2341         btrfs_mark_buffer_dirty(leaf);
2342 out:
2343         btrfs_free_path(path);
2344         return err;
2345 }
2346
2347 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2348                                 struct btrfs_root *root,
2349                                 struct btrfs_delayed_ref_node *node,
2350                                 struct btrfs_delayed_extent_op *extent_op,
2351                                 int insert_reserved)
2352 {
2353         int ret = 0;
2354         struct btrfs_delayed_tree_ref *ref;
2355         struct btrfs_key ins;
2356         u64 parent = 0;
2357         u64 ref_root = 0;
2358         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2359                                                  SKINNY_METADATA);
2360
2361         ref = btrfs_delayed_node_to_tree_ref(node);
2362         trace_run_delayed_tree_ref(node, ref, node->action);
2363
2364         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2365                 parent = ref->parent;
2366         ref_root = ref->root;
2367
2368         ins.objectid = node->bytenr;
2369         if (skinny_metadata) {
2370                 ins.offset = ref->level;
2371                 ins.type = BTRFS_METADATA_ITEM_KEY;
2372         } else {
2373                 ins.offset = node->num_bytes;
2374                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2375         }
2376
2377         BUG_ON(node->ref_mod != 1);
2378         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2379                 BUG_ON(!extent_op || !extent_op->update_flags);
2380                 ret = alloc_reserved_tree_block(trans, root,
2381                                                 parent, ref_root,
2382                                                 extent_op->flags_to_set,
2383                                                 &extent_op->key,
2384                                                 ref->level, &ins);
2385         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2386                 ret = __btrfs_inc_extent_ref(trans, root, node,
2387                                              parent, ref_root,
2388                                              ref->level, 0, 1,
2389                                              extent_op);
2390         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2391                 ret = __btrfs_free_extent(trans, root, node,
2392                                           parent, ref_root,
2393                                           ref->level, 0, 1, extent_op);
2394         } else {
2395                 BUG();
2396         }
2397         return ret;
2398 }
2399
2400 /* helper function to actually process a single delayed ref entry */
2401 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2402                                struct btrfs_root *root,
2403                                struct btrfs_delayed_ref_node *node,
2404                                struct btrfs_delayed_extent_op *extent_op,
2405                                int insert_reserved)
2406 {
2407         int ret = 0;
2408
2409         if (trans->aborted) {
2410                 if (insert_reserved)
2411                         btrfs_pin_extent(root, node->bytenr,
2412                                          node->num_bytes, 1);
2413                 return 0;
2414         }
2415
2416         if (btrfs_delayed_ref_is_head(node)) {
2417                 struct btrfs_delayed_ref_head *head;
2418                 /*
2419                  * we've hit the end of the chain and we were supposed
2420                  * to insert this extent into the tree.  But, it got
2421                  * deleted before we ever needed to insert it, so all
2422                  * we have to do is clean up the accounting
2423                  */
2424                 BUG_ON(extent_op);
2425                 head = btrfs_delayed_node_to_head(node);
2426                 trace_run_delayed_ref_head(node, head, node->action);
2427
2428                 if (insert_reserved) {
2429                         btrfs_pin_extent(root, node->bytenr,
2430                                          node->num_bytes, 1);
2431                         if (head->is_data) {
2432                                 ret = btrfs_del_csums(trans, root,
2433                                                       node->bytenr,
2434                                                       node->num_bytes);
2435                         }
2436                 }
2437
2438                 /* Also free its reserved qgroup space */
2439                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2440                                               head->qgroup_ref_root,
2441                                               head->qgroup_reserved);
2442                 return ret;
2443         }
2444
2445         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2446             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2447                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2448                                            insert_reserved);
2449         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2450                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2451                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2452                                            insert_reserved);
2453         else
2454                 BUG();
2455         return ret;
2456 }
2457
2458 static inline struct btrfs_delayed_ref_node *
2459 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2460 {
2461         struct btrfs_delayed_ref_node *ref;
2462
2463         if (list_empty(&head->ref_list))
2464                 return NULL;
2465
2466         /*
2467          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2468          * This is to prevent a ref count from going down to zero, which deletes
2469          * the extent item from the extent tree, when there still are references
2470          * to add, which would fail because they would not find the extent item.
2471          */
2472         list_for_each_entry(ref, &head->ref_list, list) {
2473                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2474                         return ref;
2475         }
2476
2477         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2478                           list);
2479 }
2480
2481 /*
2482  * Returns 0 on success or if called with an already aborted transaction.
2483  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2484  */
2485 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2486                                              struct btrfs_root *root,
2487                                              unsigned long nr)
2488 {
2489         struct btrfs_delayed_ref_root *delayed_refs;
2490         struct btrfs_delayed_ref_node *ref;
2491         struct btrfs_delayed_ref_head *locked_ref = NULL;
2492         struct btrfs_delayed_extent_op *extent_op;
2493         struct btrfs_fs_info *fs_info = root->fs_info;
2494         ktime_t start = ktime_get();
2495         int ret;
2496         unsigned long count = 0;
2497         unsigned long actual_count = 0;
2498         int must_insert_reserved = 0;
2499
2500         delayed_refs = &trans->transaction->delayed_refs;
2501         while (1) {
2502                 if (!locked_ref) {
2503                         if (count >= nr)
2504                                 break;
2505
2506                         spin_lock(&delayed_refs->lock);
2507                         locked_ref = btrfs_select_ref_head(trans);
2508                         if (!locked_ref) {
2509                                 spin_unlock(&delayed_refs->lock);
2510                                 break;
2511                         }
2512
2513                         /* grab the lock that says we are going to process
2514                          * all the refs for this head */
2515                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2516                         spin_unlock(&delayed_refs->lock);
2517                         /*
2518                          * we may have dropped the spin lock to get the head
2519                          * mutex lock, and that might have given someone else
2520                          * time to free the head.  If that's true, it has been
2521                          * removed from our list and we can move on.
2522                          */
2523                         if (ret == -EAGAIN) {
2524                                 locked_ref = NULL;
2525                                 count++;
2526                                 continue;
2527                         }
2528                 }
2529
2530                 /*
2531                  * We need to try and merge add/drops of the same ref since we
2532                  * can run into issues with relocate dropping the implicit ref
2533                  * and then it being added back again before the drop can
2534                  * finish.  If we merged anything we need to re-loop so we can
2535                  * get a good ref.
2536                  * Or we can get node references of the same type that weren't
2537                  * merged when created due to bumps in the tree mod seq, and
2538                  * we need to merge them to prevent adding an inline extent
2539                  * backref before dropping it (triggering a BUG_ON at
2540                  * insert_inline_extent_backref()).
2541                  */
2542                 spin_lock(&locked_ref->lock);
2543                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2544                                          locked_ref);
2545
2546                 /*
2547                  * locked_ref is the head node, so we have to go one
2548                  * node back for any delayed ref updates
2549                  */
2550                 ref = select_delayed_ref(locked_ref);
2551
2552                 if (ref && ref->seq &&
2553                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2554                         spin_unlock(&locked_ref->lock);
2555                         btrfs_delayed_ref_unlock(locked_ref);
2556                         spin_lock(&delayed_refs->lock);
2557                         locked_ref->processing = 0;
2558                         delayed_refs->num_heads_ready++;
2559                         spin_unlock(&delayed_refs->lock);
2560                         locked_ref = NULL;
2561                         cond_resched();
2562                         count++;
2563                         continue;
2564                 }
2565
2566                 /*
2567                  * record the must insert reserved flag before we
2568                  * drop the spin lock.
2569                  */
2570                 must_insert_reserved = locked_ref->must_insert_reserved;
2571                 locked_ref->must_insert_reserved = 0;
2572
2573                 extent_op = locked_ref->extent_op;
2574                 locked_ref->extent_op = NULL;
2575
2576                 if (!ref) {
2577
2578
2579                         /* All delayed refs have been processed, Go ahead
2580                          * and send the head node to run_one_delayed_ref,
2581                          * so that any accounting fixes can happen
2582                          */
2583                         ref = &locked_ref->node;
2584
2585                         if (extent_op && must_insert_reserved) {
2586                                 btrfs_free_delayed_extent_op(extent_op);
2587                                 extent_op = NULL;
2588                         }
2589
2590                         if (extent_op) {
2591                                 spin_unlock(&locked_ref->lock);
2592                                 ret = run_delayed_extent_op(trans, root,
2593                                                             ref, extent_op);
2594                                 btrfs_free_delayed_extent_op(extent_op);
2595
2596                                 if (ret) {
2597                                         /*
2598                                          * Need to reset must_insert_reserved if
2599                                          * there was an error so the abort stuff
2600                                          * can cleanup the reserved space
2601                                          * properly.
2602                                          */
2603                                         if (must_insert_reserved)
2604                                                 locked_ref->must_insert_reserved = 1;
2605                                         locked_ref->processing = 0;
2606                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2607                                         btrfs_delayed_ref_unlock(locked_ref);
2608                                         return ret;
2609                                 }
2610                                 continue;
2611                         }
2612
2613                         /*
2614                          * Need to drop our head ref lock and re-acquire the
2615                          * delayed ref lock and then re-check to make sure
2616                          * nobody got added.
2617                          */
2618                         spin_unlock(&locked_ref->lock);
2619                         spin_lock(&delayed_refs->lock);
2620                         spin_lock(&locked_ref->lock);
2621                         if (!list_empty(&locked_ref->ref_list) ||
2622                             locked_ref->extent_op) {
2623                                 spin_unlock(&locked_ref->lock);
2624                                 spin_unlock(&delayed_refs->lock);
2625                                 continue;
2626                         }
2627                         ref->in_tree = 0;
2628                         delayed_refs->num_heads--;
2629                         rb_erase(&locked_ref->href_node,
2630                                  &delayed_refs->href_root);
2631                         spin_unlock(&delayed_refs->lock);
2632                 } else {
2633                         actual_count++;
2634                         ref->in_tree = 0;
2635                         list_del(&ref->list);
2636                 }
2637                 atomic_dec(&delayed_refs->num_entries);
2638
2639                 if (!btrfs_delayed_ref_is_head(ref)) {
2640                         /*
2641                          * when we play the delayed ref, also correct the
2642                          * ref_mod on head
2643                          */
2644                         switch (ref->action) {
2645                         case BTRFS_ADD_DELAYED_REF:
2646                         case BTRFS_ADD_DELAYED_EXTENT:
2647                                 locked_ref->node.ref_mod -= ref->ref_mod;
2648                                 break;
2649                         case BTRFS_DROP_DELAYED_REF:
2650                                 locked_ref->node.ref_mod += ref->ref_mod;
2651                                 break;
2652                         default:
2653                                 WARN_ON(1);
2654                         }
2655                 }
2656                 spin_unlock(&locked_ref->lock);
2657
2658                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2659                                           must_insert_reserved);
2660
2661                 btrfs_free_delayed_extent_op(extent_op);
2662                 if (ret) {
2663                         locked_ref->processing = 0;
2664                         btrfs_delayed_ref_unlock(locked_ref);
2665                         btrfs_put_delayed_ref(ref);
2666                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2667                         return ret;
2668                 }
2669
2670                 /*
2671                  * If this node is a head, that means all the refs in this head
2672                  * have been dealt with, and we will pick the next head to deal
2673                  * with, so we must unlock the head and drop it from the cluster
2674                  * list before we release it.
2675                  */
2676                 if (btrfs_delayed_ref_is_head(ref)) {
2677                         if (locked_ref->is_data &&
2678                             locked_ref->total_ref_mod < 0) {
2679                                 spin_lock(&delayed_refs->lock);
2680                                 delayed_refs->pending_csums -= ref->num_bytes;
2681                                 spin_unlock(&delayed_refs->lock);
2682                         }
2683                         btrfs_delayed_ref_unlock(locked_ref);
2684                         locked_ref = NULL;
2685                 }
2686                 btrfs_put_delayed_ref(ref);
2687                 count++;
2688                 cond_resched();
2689         }
2690
2691         /*
2692          * We don't want to include ref heads since we can have empty ref heads
2693          * and those will drastically skew our runtime down since we just do
2694          * accounting, no actual extent tree updates.
2695          */
2696         if (actual_count > 0) {
2697                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2698                 u64 avg;
2699
2700                 /*
2701                  * We weigh the current average higher than our current runtime
2702                  * to avoid large swings in the average.
2703                  */
2704                 spin_lock(&delayed_refs->lock);
2705                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2706                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2707                 spin_unlock(&delayed_refs->lock);
2708         }
2709         return 0;
2710 }
2711
2712 #ifdef SCRAMBLE_DELAYED_REFS
2713 /*
2714  * Normally delayed refs get processed in ascending bytenr order. This
2715  * correlates in most cases to the order added. To expose dependencies on this
2716  * order, we start to process the tree in the middle instead of the beginning
2717  */
2718 static u64 find_middle(struct rb_root *root)
2719 {
2720         struct rb_node *n = root->rb_node;
2721         struct btrfs_delayed_ref_node *entry;
2722         int alt = 1;
2723         u64 middle;
2724         u64 first = 0, last = 0;
2725
2726         n = rb_first(root);
2727         if (n) {
2728                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2729                 first = entry->bytenr;
2730         }
2731         n = rb_last(root);
2732         if (n) {
2733                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2734                 last = entry->bytenr;
2735         }
2736         n = root->rb_node;
2737
2738         while (n) {
2739                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2740                 WARN_ON(!entry->in_tree);
2741
2742                 middle = entry->bytenr;
2743
2744                 if (alt)
2745                         n = n->rb_left;
2746                 else
2747                         n = n->rb_right;
2748
2749                 alt = 1 - alt;
2750         }
2751         return middle;
2752 }
2753 #endif
2754
2755 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2756 {
2757         u64 num_bytes;
2758
2759         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2760                              sizeof(struct btrfs_extent_inline_ref));
2761         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2762                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2763
2764         /*
2765          * We don't ever fill up leaves all the way so multiply by 2 just to be
2766          * closer to what we're really going to want to use.
2767          */
2768         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2769 }
2770
2771 /*
2772  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2773  * would require to store the csums for that many bytes.
2774  */
2775 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2776 {
2777         u64 csum_size;
2778         u64 num_csums_per_leaf;
2779         u64 num_csums;
2780
2781         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2782         num_csums_per_leaf = div64_u64(csum_size,
2783                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2784         num_csums = div64_u64(csum_bytes, root->sectorsize);
2785         num_csums += num_csums_per_leaf - 1;
2786         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2787         return num_csums;
2788 }
2789
2790 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2791                                        struct btrfs_root *root)
2792 {
2793         struct btrfs_block_rsv *global_rsv;
2794         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2795         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2796         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2797         u64 num_bytes, num_dirty_bgs_bytes;
2798         int ret = 0;
2799
2800         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2801         num_heads = heads_to_leaves(root, num_heads);
2802         if (num_heads > 1)
2803                 num_bytes += (num_heads - 1) * root->nodesize;
2804         num_bytes <<= 1;
2805         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2806         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2807                                                              num_dirty_bgs);
2808         global_rsv = &root->fs_info->global_block_rsv;
2809
2810         /*
2811          * If we can't allocate any more chunks lets make sure we have _lots_ of
2812          * wiggle room since running delayed refs can create more delayed refs.
2813          */
2814         if (global_rsv->space_info->full) {
2815                 num_dirty_bgs_bytes <<= 1;
2816                 num_bytes <<= 1;
2817         }
2818
2819         spin_lock(&global_rsv->lock);
2820         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2821                 ret = 1;
2822         spin_unlock(&global_rsv->lock);
2823         return ret;
2824 }
2825
2826 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2827                                        struct btrfs_root *root)
2828 {
2829         struct btrfs_fs_info *fs_info = root->fs_info;
2830         u64 num_entries =
2831                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2832         u64 avg_runtime;
2833         u64 val;
2834
2835         smp_mb();
2836         avg_runtime = fs_info->avg_delayed_ref_runtime;
2837         val = num_entries * avg_runtime;
2838         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2839                 return 1;
2840         if (val >= NSEC_PER_SEC / 2)
2841                 return 2;
2842
2843         return btrfs_check_space_for_delayed_refs(trans, root);
2844 }
2845
2846 struct async_delayed_refs {
2847         struct btrfs_root *root;
2848         u64 transid;
2849         int count;
2850         int error;
2851         int sync;
2852         struct completion wait;
2853         struct btrfs_work work;
2854 };
2855
2856 static void delayed_ref_async_start(struct btrfs_work *work)
2857 {
2858         struct async_delayed_refs *async;
2859         struct btrfs_trans_handle *trans;
2860         int ret;
2861
2862         async = container_of(work, struct async_delayed_refs, work);
2863
2864         /* if the commit is already started, we don't need to wait here */
2865         if (btrfs_transaction_blocked(async->root->fs_info))
2866                 goto done;
2867
2868         trans = btrfs_join_transaction(async->root);
2869         if (IS_ERR(trans)) {
2870                 async->error = PTR_ERR(trans);
2871                 goto done;
2872         }
2873
2874         /*
2875          * trans->sync means that when we call end_transaction, we won't
2876          * wait on delayed refs
2877          */
2878         trans->sync = true;
2879
2880         /* Don't bother flushing if we got into a different transaction */
2881         if (trans->transid > async->transid)
2882                 goto end;
2883
2884         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2885         if (ret)
2886                 async->error = ret;
2887 end:
2888         ret = btrfs_end_transaction(trans, async->root);
2889         if (ret && !async->error)
2890                 async->error = ret;
2891 done:
2892         if (async->sync)
2893                 complete(&async->wait);
2894         else
2895                 kfree(async);
2896 }
2897
2898 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2899                                  unsigned long count, u64 transid, int wait)
2900 {
2901         struct async_delayed_refs *async;
2902         int ret;
2903
2904         async = kmalloc(sizeof(*async), GFP_NOFS);
2905         if (!async)
2906                 return -ENOMEM;
2907
2908         async->root = root->fs_info->tree_root;
2909         async->count = count;
2910         async->error = 0;
2911         async->transid = transid;
2912         if (wait)
2913                 async->sync = 1;
2914         else
2915                 async->sync = 0;
2916         init_completion(&async->wait);
2917
2918         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2919                         delayed_ref_async_start, NULL, NULL);
2920
2921         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2922
2923         if (wait) {
2924                 wait_for_completion(&async->wait);
2925                 ret = async->error;
2926                 kfree(async);
2927                 return ret;
2928         }
2929         return 0;
2930 }
2931
2932 /*
2933  * this starts processing the delayed reference count updates and
2934  * extent insertions we have queued up so far.  count can be
2935  * 0, which means to process everything in the tree at the start
2936  * of the run (but not newly added entries), or it can be some target
2937  * number you'd like to process.
2938  *
2939  * Returns 0 on success or if called with an aborted transaction
2940  * Returns <0 on error and aborts the transaction
2941  */
2942 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2943                            struct btrfs_root *root, unsigned long count)
2944 {
2945         struct rb_node *node;
2946         struct btrfs_delayed_ref_root *delayed_refs;
2947         struct btrfs_delayed_ref_head *head;
2948         int ret;
2949         int run_all = count == (unsigned long)-1;
2950         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2951
2952         /* We'll clean this up in btrfs_cleanup_transaction */
2953         if (trans->aborted)
2954                 return 0;
2955
2956         if (root->fs_info->creating_free_space_tree)
2957                 return 0;
2958
2959         if (root == root->fs_info->extent_root)
2960                 root = root->fs_info->tree_root;
2961
2962         delayed_refs = &trans->transaction->delayed_refs;
2963         if (count == 0)
2964                 count = atomic_read(&delayed_refs->num_entries) * 2;
2965
2966 again:
2967 #ifdef SCRAMBLE_DELAYED_REFS
2968         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2969 #endif
2970         trans->can_flush_pending_bgs = false;
2971         ret = __btrfs_run_delayed_refs(trans, root, count);
2972         if (ret < 0) {
2973                 btrfs_abort_transaction(trans, root, ret);
2974                 return ret;
2975         }
2976
2977         if (run_all) {
2978                 if (!list_empty(&trans->new_bgs))
2979                         btrfs_create_pending_block_groups(trans, root);
2980
2981                 spin_lock(&delayed_refs->lock);
2982                 node = rb_first(&delayed_refs->href_root);
2983                 if (!node) {
2984                         spin_unlock(&delayed_refs->lock);
2985                         goto out;
2986                 }
2987                 count = (unsigned long)-1;
2988
2989                 while (node) {
2990                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2991                                         href_node);
2992                         if (btrfs_delayed_ref_is_head(&head->node)) {
2993                                 struct btrfs_delayed_ref_node *ref;
2994
2995                                 ref = &head->node;
2996                                 atomic_inc(&ref->refs);
2997
2998                                 spin_unlock(&delayed_refs->lock);
2999                                 /*
3000                                  * Mutex was contended, block until it's
3001                                  * released and try again
3002                                  */
3003                                 mutex_lock(&head->mutex);
3004                                 mutex_unlock(&head->mutex);
3005
3006                                 btrfs_put_delayed_ref(ref);
3007                                 cond_resched();
3008                                 goto again;
3009                         } else {
3010                                 WARN_ON(1);
3011                         }
3012                         node = rb_next(node);
3013                 }
3014                 spin_unlock(&delayed_refs->lock);
3015                 cond_resched();
3016                 goto again;
3017         }
3018 out:
3019         assert_qgroups_uptodate(trans);
3020         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3021         return 0;
3022 }
3023
3024 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3025                                 struct btrfs_root *root,
3026                                 u64 bytenr, u64 num_bytes, u64 flags,
3027                                 int level, int is_data)
3028 {
3029         struct btrfs_delayed_extent_op *extent_op;
3030         int ret;
3031
3032         extent_op = btrfs_alloc_delayed_extent_op();
3033         if (!extent_op)
3034                 return -ENOMEM;
3035
3036         extent_op->flags_to_set = flags;
3037         extent_op->update_flags = true;
3038         extent_op->update_key = false;
3039         extent_op->is_data = is_data ? true : false;
3040         extent_op->level = level;
3041
3042         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3043                                           num_bytes, extent_op);
3044         if (ret)
3045                 btrfs_free_delayed_extent_op(extent_op);
3046         return ret;
3047 }
3048
3049 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3050                                       struct btrfs_root *root,
3051                                       struct btrfs_path *path,
3052                                       u64 objectid, u64 offset, u64 bytenr)
3053 {
3054         struct btrfs_delayed_ref_head *head;
3055         struct btrfs_delayed_ref_node *ref;
3056         struct btrfs_delayed_data_ref *data_ref;
3057         struct btrfs_delayed_ref_root *delayed_refs;
3058         int ret = 0;
3059
3060         delayed_refs = &trans->transaction->delayed_refs;
3061         spin_lock(&delayed_refs->lock);
3062         head = btrfs_find_delayed_ref_head(trans, bytenr);
3063         if (!head) {
3064                 spin_unlock(&delayed_refs->lock);
3065                 return 0;
3066         }
3067
3068         if (!mutex_trylock(&head->mutex)) {
3069                 atomic_inc(&head->node.refs);
3070                 spin_unlock(&delayed_refs->lock);
3071
3072                 btrfs_release_path(path);
3073
3074                 /*
3075                  * Mutex was contended, block until it's released and let
3076                  * caller try again
3077                  */
3078                 mutex_lock(&head->mutex);
3079                 mutex_unlock(&head->mutex);
3080                 btrfs_put_delayed_ref(&head->node);
3081                 return -EAGAIN;
3082         }
3083         spin_unlock(&delayed_refs->lock);
3084
3085         spin_lock(&head->lock);
3086         list_for_each_entry(ref, &head->ref_list, list) {
3087                 /* If it's a shared ref we know a cross reference exists */
3088                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3089                         ret = 1;
3090                         break;
3091                 }
3092
3093                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3094
3095                 /*
3096                  * If our ref doesn't match the one we're currently looking at
3097                  * then we have a cross reference.
3098                  */
3099                 if (data_ref->root != root->root_key.objectid ||
3100                     data_ref->objectid != objectid ||
3101                     data_ref->offset != offset) {
3102                         ret = 1;
3103                         break;
3104                 }
3105         }
3106         spin_unlock(&head->lock);
3107         mutex_unlock(&head->mutex);
3108         return ret;
3109 }
3110
3111 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3112                                         struct btrfs_root *root,
3113                                         struct btrfs_path *path,
3114                                         u64 objectid, u64 offset, u64 bytenr)
3115 {
3116         struct btrfs_root *extent_root = root->fs_info->extent_root;
3117         struct extent_buffer *leaf;
3118         struct btrfs_extent_data_ref *ref;
3119         struct btrfs_extent_inline_ref *iref;
3120         struct btrfs_extent_item *ei;
3121         struct btrfs_key key;
3122         u32 item_size;
3123         int ret;
3124
3125         key.objectid = bytenr;
3126         key.offset = (u64)-1;
3127         key.type = BTRFS_EXTENT_ITEM_KEY;
3128
3129         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3130         if (ret < 0)
3131                 goto out;
3132         BUG_ON(ret == 0); /* Corruption */
3133
3134         ret = -ENOENT;
3135         if (path->slots[0] == 0)
3136                 goto out;
3137
3138         path->slots[0]--;
3139         leaf = path->nodes[0];
3140         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3141
3142         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3143                 goto out;
3144
3145         ret = 1;
3146         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3147 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3148         if (item_size < sizeof(*ei)) {
3149                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3150                 goto out;
3151         }
3152 #endif
3153         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3154
3155         if (item_size != sizeof(*ei) +
3156             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3157                 goto out;
3158
3159         if (btrfs_extent_generation(leaf, ei) <=
3160             btrfs_root_last_snapshot(&root->root_item))
3161                 goto out;
3162
3163         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3164         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3165             BTRFS_EXTENT_DATA_REF_KEY)
3166                 goto out;
3167
3168         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3169         if (btrfs_extent_refs(leaf, ei) !=
3170             btrfs_extent_data_ref_count(leaf, ref) ||
3171             btrfs_extent_data_ref_root(leaf, ref) !=
3172             root->root_key.objectid ||
3173             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3174             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3175                 goto out;
3176
3177         ret = 0;
3178 out:
3179         return ret;
3180 }
3181
3182 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3183                           struct btrfs_root *root,
3184                           u64 objectid, u64 offset, u64 bytenr)
3185 {
3186         struct btrfs_path *path;
3187         int ret;
3188         int ret2;
3189
3190         path = btrfs_alloc_path();
3191         if (!path)
3192                 return -ENOENT;
3193
3194         do {
3195                 ret = check_committed_ref(trans, root, path, objectid,
3196                                           offset, bytenr);
3197                 if (ret && ret != -ENOENT)
3198                         goto out;
3199
3200                 ret2 = check_delayed_ref(trans, root, path, objectid,
3201                                          offset, bytenr);
3202         } while (ret2 == -EAGAIN);
3203
3204         if (ret2 && ret2 != -ENOENT) {
3205                 ret = ret2;
3206                 goto out;
3207         }
3208
3209         if (ret != -ENOENT || ret2 != -ENOENT)
3210                 ret = 0;
3211 out:
3212         btrfs_free_path(path);
3213         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3214                 WARN_ON(ret > 0);
3215         return ret;
3216 }
3217
3218 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3219                            struct btrfs_root *root,
3220                            struct extent_buffer *buf,
3221                            int full_backref, int inc)
3222 {
3223         u64 bytenr;
3224         u64 num_bytes;
3225         u64 parent;
3226         u64 ref_root;
3227         u32 nritems;
3228         struct btrfs_key key;
3229         struct btrfs_file_extent_item *fi;
3230         int i;
3231         int level;
3232         int ret = 0;
3233         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3234                             u64, u64, u64, u64, u64, u64);
3235
3236
3237         if (btrfs_test_is_dummy_root(root))
3238                 return 0;
3239
3240         ref_root = btrfs_header_owner(buf);
3241         nritems = btrfs_header_nritems(buf);
3242         level = btrfs_header_level(buf);
3243
3244         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3245                 return 0;
3246
3247         if (inc)
3248                 process_func = btrfs_inc_extent_ref;
3249         else
3250                 process_func = btrfs_free_extent;
3251
3252         if (full_backref)
3253                 parent = buf->start;
3254         else
3255                 parent = 0;
3256
3257         for (i = 0; i < nritems; i++) {
3258                 if (level == 0) {
3259                         btrfs_item_key_to_cpu(buf, &key, i);
3260                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3261                                 continue;
3262                         fi = btrfs_item_ptr(buf, i,
3263                                             struct btrfs_file_extent_item);
3264                         if (btrfs_file_extent_type(buf, fi) ==
3265                             BTRFS_FILE_EXTENT_INLINE)
3266                                 continue;
3267                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3268                         if (bytenr == 0)
3269                                 continue;
3270
3271                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3272                         key.offset -= btrfs_file_extent_offset(buf, fi);
3273                         ret = process_func(trans, root, bytenr, num_bytes,
3274                                            parent, ref_root, key.objectid,
3275                                            key.offset);
3276                         if (ret)
3277                                 goto fail;
3278                 } else {
3279                         bytenr = btrfs_node_blockptr(buf, i);
3280                         num_bytes = root->nodesize;
3281                         ret = process_func(trans, root, bytenr, num_bytes,
3282                                            parent, ref_root, level - 1, 0);
3283                         if (ret)
3284                                 goto fail;
3285                 }
3286         }
3287         return 0;
3288 fail:
3289         return ret;
3290 }
3291
3292 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3293                   struct extent_buffer *buf, int full_backref)
3294 {
3295         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3296 }
3297
3298 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3299                   struct extent_buffer *buf, int full_backref)
3300 {
3301         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3302 }
3303
3304 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3305                                  struct btrfs_root *root,
3306                                  struct btrfs_path *path,
3307                                  struct btrfs_block_group_cache *cache)
3308 {
3309         int ret;
3310         struct btrfs_root *extent_root = root->fs_info->extent_root;
3311         unsigned long bi;
3312         struct extent_buffer *leaf;
3313
3314         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3315         if (ret) {
3316                 if (ret > 0)
3317                         ret = -ENOENT;
3318                 goto fail;
3319         }
3320
3321         leaf = path->nodes[0];
3322         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3323         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3324         btrfs_mark_buffer_dirty(leaf);
3325 fail:
3326         btrfs_release_path(path);
3327         return ret;
3328
3329 }
3330
3331 static struct btrfs_block_group_cache *
3332 next_block_group(struct btrfs_root *root,
3333                  struct btrfs_block_group_cache *cache)
3334 {
3335         struct rb_node *node;
3336
3337         spin_lock(&root->fs_info->block_group_cache_lock);
3338
3339         /* If our block group was removed, we need a full search. */
3340         if (RB_EMPTY_NODE(&cache->cache_node)) {
3341                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3342
3343                 spin_unlock(&root->fs_info->block_group_cache_lock);
3344                 btrfs_put_block_group(cache);
3345                 cache = btrfs_lookup_first_block_group(root->fs_info,
3346                                                        next_bytenr);
3347                 return cache;
3348         }
3349         node = rb_next(&cache->cache_node);
3350         btrfs_put_block_group(cache);
3351         if (node) {
3352                 cache = rb_entry(node, struct btrfs_block_group_cache,
3353                                  cache_node);
3354                 btrfs_get_block_group(cache);
3355         } else
3356                 cache = NULL;
3357         spin_unlock(&root->fs_info->block_group_cache_lock);
3358         return cache;
3359 }
3360
3361 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3362                             struct btrfs_trans_handle *trans,
3363                             struct btrfs_path *path)
3364 {
3365         struct btrfs_root *root = block_group->fs_info->tree_root;
3366         struct inode *inode = NULL;
3367         u64 alloc_hint = 0;
3368         int dcs = BTRFS_DC_ERROR;
3369         u64 num_pages = 0;
3370         int retries = 0;
3371         int ret = 0;
3372
3373         /*
3374          * If this block group is smaller than 100 megs don't bother caching the
3375          * block group.
3376          */
3377         if (block_group->key.offset < (100 * SZ_1M)) {
3378                 spin_lock(&block_group->lock);
3379                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3380                 spin_unlock(&block_group->lock);
3381                 return 0;
3382         }
3383
3384         if (trans->aborted)
3385                 return 0;
3386 again:
3387         inode = lookup_free_space_inode(root, block_group, path);
3388         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3389                 ret = PTR_ERR(inode);
3390                 btrfs_release_path(path);
3391                 goto out;
3392         }
3393
3394         if (IS_ERR(inode)) {
3395                 BUG_ON(retries);
3396                 retries++;
3397
3398                 if (block_group->ro)
3399                         goto out_free;
3400
3401                 ret = create_free_space_inode(root, trans, block_group, path);
3402                 if (ret)
3403                         goto out_free;
3404                 goto again;
3405         }
3406
3407         /* We've already setup this transaction, go ahead and exit */
3408         if (block_group->cache_generation == trans->transid &&
3409             i_size_read(inode)) {
3410                 dcs = BTRFS_DC_SETUP;
3411                 goto out_put;
3412         }
3413
3414         /*
3415          * We want to set the generation to 0, that way if anything goes wrong
3416          * from here on out we know not to trust this cache when we load up next
3417          * time.
3418          */
3419         BTRFS_I(inode)->generation = 0;
3420         ret = btrfs_update_inode(trans, root, inode);
3421         if (ret) {
3422                 /*
3423                  * So theoretically we could recover from this, simply set the
3424                  * super cache generation to 0 so we know to invalidate the
3425                  * cache, but then we'd have to keep track of the block groups
3426                  * that fail this way so we know we _have_ to reset this cache
3427                  * before the next commit or risk reading stale cache.  So to
3428                  * limit our exposure to horrible edge cases lets just abort the
3429                  * transaction, this only happens in really bad situations
3430                  * anyway.
3431                  */
3432                 btrfs_abort_transaction(trans, root, ret);
3433                 goto out_put;
3434         }
3435         WARN_ON(ret);
3436
3437         if (i_size_read(inode) > 0) {
3438                 ret = btrfs_check_trunc_cache_free_space(root,
3439                                         &root->fs_info->global_block_rsv);
3440                 if (ret)
3441                         goto out_put;
3442
3443                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3444                 if (ret)
3445                         goto out_put;
3446         }
3447
3448         spin_lock(&block_group->lock);
3449         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3450             !btrfs_test_opt(root, SPACE_CACHE)) {
3451                 /*
3452                  * don't bother trying to write stuff out _if_
3453                  * a) we're not cached,
3454                  * b) we're with nospace_cache mount option.
3455                  */
3456                 dcs = BTRFS_DC_WRITTEN;
3457                 spin_unlock(&block_group->lock);
3458                 goto out_put;
3459         }
3460         spin_unlock(&block_group->lock);
3461
3462         /*
3463          * We hit an ENOSPC when setting up the cache in this transaction, just
3464          * skip doing the setup, we've already cleared the cache so we're safe.
3465          */
3466         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3467                 ret = -ENOSPC;
3468                 goto out_put;
3469         }
3470
3471         /*
3472          * Try to preallocate enough space based on how big the block group is.
3473          * Keep in mind this has to include any pinned space which could end up
3474          * taking up quite a bit since it's not folded into the other space
3475          * cache.
3476          */
3477         num_pages = div_u64(block_group->key.offset, SZ_256M);
3478         if (!num_pages)
3479                 num_pages = 1;
3480
3481         num_pages *= 16;
3482         num_pages *= PAGE_SIZE;
3483
3484         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3485         if (ret)
3486                 goto out_put;
3487
3488         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3489                                               num_pages, num_pages,
3490                                               &alloc_hint);
3491         /*
3492          * Our cache requires contiguous chunks so that we don't modify a bunch
3493          * of metadata or split extents when writing the cache out, which means
3494          * we can enospc if we are heavily fragmented in addition to just normal
3495          * out of space conditions.  So if we hit this just skip setting up any
3496          * other block groups for this transaction, maybe we'll unpin enough
3497          * space the next time around.
3498          */
3499         if (!ret)
3500                 dcs = BTRFS_DC_SETUP;
3501         else if (ret == -ENOSPC)
3502                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3503         btrfs_free_reserved_data_space(inode, 0, num_pages);
3504
3505 out_put:
3506         iput(inode);
3507 out_free:
3508         btrfs_release_path(path);
3509 out:
3510         spin_lock(&block_group->lock);
3511         if (!ret && dcs == BTRFS_DC_SETUP)
3512                 block_group->cache_generation = trans->transid;
3513         block_group->disk_cache_state = dcs;
3514         spin_unlock(&block_group->lock);
3515
3516         return ret;
3517 }
3518
3519 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3520                             struct btrfs_root *root)
3521 {
3522         struct btrfs_block_group_cache *cache, *tmp;
3523         struct btrfs_transaction *cur_trans = trans->transaction;
3524         struct btrfs_path *path;
3525
3526         if (list_empty(&cur_trans->dirty_bgs) ||
3527             !btrfs_test_opt(root, SPACE_CACHE))
3528                 return 0;
3529
3530         path = btrfs_alloc_path();
3531         if (!path)
3532                 return -ENOMEM;
3533
3534         /* Could add new block groups, use _safe just in case */
3535         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3536                                  dirty_list) {
3537                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3538                         cache_save_setup(cache, trans, path);
3539         }
3540
3541         btrfs_free_path(path);
3542         return 0;
3543 }
3544
3545 /*
3546  * transaction commit does final block group cache writeback during a
3547  * critical section where nothing is allowed to change the FS.  This is
3548  * required in order for the cache to actually match the block group,
3549  * but can introduce a lot of latency into the commit.
3550  *
3551  * So, btrfs_start_dirty_block_groups is here to kick off block group
3552  * cache IO.  There's a chance we'll have to redo some of it if the
3553  * block group changes again during the commit, but it greatly reduces
3554  * the commit latency by getting rid of the easy block groups while
3555  * we're still allowing others to join the commit.
3556  */
3557 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3558                                    struct btrfs_root *root)
3559 {
3560         struct btrfs_block_group_cache *cache;
3561         struct btrfs_transaction *cur_trans = trans->transaction;
3562         int ret = 0;
3563         int should_put;
3564         struct btrfs_path *path = NULL;
3565         LIST_HEAD(dirty);
3566         struct list_head *io = &cur_trans->io_bgs;
3567         int num_started = 0;
3568         int loops = 0;
3569
3570         spin_lock(&cur_trans->dirty_bgs_lock);
3571         if (list_empty(&cur_trans->dirty_bgs)) {
3572                 spin_unlock(&cur_trans->dirty_bgs_lock);
3573                 return 0;
3574         }
3575         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3576         spin_unlock(&cur_trans->dirty_bgs_lock);
3577
3578 again:
3579         /*
3580          * make sure all the block groups on our dirty list actually
3581          * exist
3582          */
3583         btrfs_create_pending_block_groups(trans, root);
3584
3585         if (!path) {
3586                 path = btrfs_alloc_path();
3587                 if (!path)
3588                         return -ENOMEM;
3589         }
3590
3591         /*
3592          * cache_write_mutex is here only to save us from balance or automatic
3593          * removal of empty block groups deleting this block group while we are
3594          * writing out the cache
3595          */
3596         mutex_lock(&trans->transaction->cache_write_mutex);
3597         while (!list_empty(&dirty)) {
3598                 cache = list_first_entry(&dirty,
3599                                          struct btrfs_block_group_cache,
3600                                          dirty_list);
3601                 /*
3602                  * this can happen if something re-dirties a block
3603                  * group that is already under IO.  Just wait for it to
3604                  * finish and then do it all again
3605                  */
3606                 if (!list_empty(&cache->io_list)) {
3607                         list_del_init(&cache->io_list);
3608                         btrfs_wait_cache_io(root, trans, cache,
3609                                             &cache->io_ctl, path,
3610                                             cache->key.objectid);
3611                         btrfs_put_block_group(cache);
3612                 }
3613
3614
3615                 /*
3616                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3617                  * if it should update the cache_state.  Don't delete
3618                  * until after we wait.
3619                  *
3620                  * Since we're not running in the commit critical section
3621                  * we need the dirty_bgs_lock to protect from update_block_group
3622                  */
3623                 spin_lock(&cur_trans->dirty_bgs_lock);
3624                 list_del_init(&cache->dirty_list);
3625                 spin_unlock(&cur_trans->dirty_bgs_lock);
3626
3627                 should_put = 1;
3628
3629                 cache_save_setup(cache, trans, path);
3630
3631                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3632                         cache->io_ctl.inode = NULL;
3633                         ret = btrfs_write_out_cache(root, trans, cache, path);
3634                         if (ret == 0 && cache->io_ctl.inode) {
3635                                 num_started++;
3636                                 should_put = 0;
3637
3638                                 /*
3639                                  * the cache_write_mutex is protecting
3640                                  * the io_list
3641                                  */
3642                                 list_add_tail(&cache->io_list, io);
3643                         } else {
3644                                 /*
3645                                  * if we failed to write the cache, the
3646                                  * generation will be bad and life goes on
3647                                  */
3648                                 ret = 0;
3649                         }
3650                 }
3651                 if (!ret) {
3652                         ret = write_one_cache_group(trans, root, path, cache);
3653                         /*
3654                          * Our block group might still be attached to the list
3655                          * of new block groups in the transaction handle of some
3656                          * other task (struct btrfs_trans_handle->new_bgs). This
3657                          * means its block group item isn't yet in the extent
3658                          * tree. If this happens ignore the error, as we will
3659                          * try again later in the critical section of the
3660                          * transaction commit.
3661                          */
3662                         if (ret == -ENOENT) {
3663                                 ret = 0;
3664                                 spin_lock(&cur_trans->dirty_bgs_lock);
3665                                 if (list_empty(&cache->dirty_list)) {
3666                                         list_add_tail(&cache->dirty_list,
3667                                                       &cur_trans->dirty_bgs);
3668                                         btrfs_get_block_group(cache);
3669                                 }
3670                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3671                         } else if (ret) {
3672                                 btrfs_abort_transaction(trans, root, ret);
3673                         }
3674                 }
3675
3676                 /* if its not on the io list, we need to put the block group */
3677                 if (should_put)
3678                         btrfs_put_block_group(cache);
3679
3680                 if (ret)
3681                         break;
3682
3683                 /*
3684                  * Avoid blocking other tasks for too long. It might even save
3685                  * us from writing caches for block groups that are going to be
3686                  * removed.
3687                  */
3688                 mutex_unlock(&trans->transaction->cache_write_mutex);
3689                 mutex_lock(&trans->transaction->cache_write_mutex);
3690         }
3691         mutex_unlock(&trans->transaction->cache_write_mutex);
3692
3693         /*
3694          * go through delayed refs for all the stuff we've just kicked off
3695          * and then loop back (just once)
3696          */
3697         ret = btrfs_run_delayed_refs(trans, root, 0);
3698         if (!ret && loops == 0) {
3699                 loops++;
3700                 spin_lock(&cur_trans->dirty_bgs_lock);
3701                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3702                 /*
3703                  * dirty_bgs_lock protects us from concurrent block group
3704                  * deletes too (not just cache_write_mutex).
3705                  */
3706                 if (!list_empty(&dirty)) {
3707                         spin_unlock(&cur_trans->dirty_bgs_lock);
3708                         goto again;
3709                 }
3710                 spin_unlock(&cur_trans->dirty_bgs_lock);
3711         }
3712
3713         btrfs_free_path(path);
3714         return ret;
3715 }
3716
3717 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3718                                    struct btrfs_root *root)
3719 {
3720         struct btrfs_block_group_cache *cache;
3721         struct btrfs_transaction *cur_trans = trans->transaction;
3722         int ret = 0;
3723         int should_put;
3724         struct btrfs_path *path;
3725         struct list_head *io = &cur_trans->io_bgs;
3726         int num_started = 0;
3727
3728         path = btrfs_alloc_path();
3729         if (!path)
3730                 return -ENOMEM;
3731
3732         /*
3733          * Even though we are in the critical section of the transaction commit,
3734          * we can still have concurrent tasks adding elements to this
3735          * transaction's list of dirty block groups. These tasks correspond to
3736          * endio free space workers started when writeback finishes for a
3737          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3738          * allocate new block groups as a result of COWing nodes of the root
3739          * tree when updating the free space inode. The writeback for the space
3740          * caches is triggered by an earlier call to
3741          * btrfs_start_dirty_block_groups() and iterations of the following
3742          * loop.
3743          * Also we want to do the cache_save_setup first and then run the
3744          * delayed refs to make sure we have the best chance at doing this all
3745          * in one shot.
3746          */
3747         spin_lock(&cur_trans->dirty_bgs_lock);
3748         while (!list_empty(&cur_trans->dirty_bgs)) {
3749                 cache = list_first_entry(&cur_trans->dirty_bgs,
3750                                          struct btrfs_block_group_cache,
3751                                          dirty_list);
3752
3753                 /*
3754                  * this can happen if cache_save_setup re-dirties a block
3755                  * group that is already under IO.  Just wait for it to
3756                  * finish and then do it all again
3757                  */
3758                 if (!list_empty(&cache->io_list)) {
3759                         spin_unlock(&cur_trans->dirty_bgs_lock);
3760                         list_del_init(&cache->io_list);
3761                         btrfs_wait_cache_io(root, trans, cache,
3762                                             &cache->io_ctl, path,
3763                                             cache->key.objectid);
3764                         btrfs_put_block_group(cache);
3765                         spin_lock(&cur_trans->dirty_bgs_lock);
3766                 }
3767
3768                 /*
3769                  * don't remove from the dirty list until after we've waited
3770                  * on any pending IO
3771                  */
3772                 list_del_init(&cache->dirty_list);
3773                 spin_unlock(&cur_trans->dirty_bgs_lock);
3774                 should_put = 1;
3775
3776                 cache_save_setup(cache, trans, path);
3777
3778                 if (!ret)
3779                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3780
3781                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3782                         cache->io_ctl.inode = NULL;
3783                         ret = btrfs_write_out_cache(root, trans, cache, path);
3784                         if (ret == 0 && cache->io_ctl.inode) {
3785                                 num_started++;
3786                                 should_put = 0;
3787                                 list_add_tail(&cache->io_list, io);
3788                         } else {
3789                                 /*
3790                                  * if we failed to write the cache, the
3791                                  * generation will be bad and life goes on
3792                                  */
3793                                 ret = 0;
3794                         }
3795                 }
3796                 if (!ret) {
3797                         ret = write_one_cache_group(trans, root, path, cache);
3798                         /*
3799                          * One of the free space endio workers might have
3800                          * created a new block group while updating a free space
3801                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3802                          * and hasn't released its transaction handle yet, in
3803                          * which case the new block group is still attached to
3804                          * its transaction handle and its creation has not
3805                          * finished yet (no block group item in the extent tree
3806                          * yet, etc). If this is the case, wait for all free
3807                          * space endio workers to finish and retry. This is a
3808                          * a very rare case so no need for a more efficient and
3809                          * complex approach.
3810                          */
3811                         if (ret == -ENOENT) {
3812                                 wait_event(cur_trans->writer_wait,
3813                                    atomic_read(&cur_trans->num_writers) == 1);
3814                                 ret = write_one_cache_group(trans, root, path,
3815                                                             cache);
3816                         }
3817                         if (ret)
3818                                 btrfs_abort_transaction(trans, root, ret);
3819                 }
3820
3821                 /* if its not on the io list, we need to put the block group */
3822                 if (should_put)
3823                         btrfs_put_block_group(cache);
3824                 spin_lock(&cur_trans->dirty_bgs_lock);
3825         }
3826         spin_unlock(&cur_trans->dirty_bgs_lock);
3827
3828         while (!list_empty(io)) {
3829                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3830                                          io_list);
3831                 list_del_init(&cache->io_list);
3832                 btrfs_wait_cache_io(root, trans, cache,
3833                                     &cache->io_ctl, path, cache->key.objectid);
3834                 btrfs_put_block_group(cache);
3835         }
3836
3837         btrfs_free_path(path);
3838         return ret;
3839 }
3840
3841 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3842 {
3843         struct btrfs_block_group_cache *block_group;
3844         int readonly = 0;
3845
3846         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3847         if (!block_group || block_group->ro)
3848                 readonly = 1;
3849         if (block_group)
3850                 btrfs_put_block_group(block_group);
3851         return readonly;
3852 }
3853
3854 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3855 {
3856         struct btrfs_block_group_cache *bg;
3857         bool ret = true;
3858
3859         bg = btrfs_lookup_block_group(fs_info, bytenr);
3860         if (!bg)
3861                 return false;
3862
3863         spin_lock(&bg->lock);
3864         if (bg->ro)
3865                 ret = false;
3866         else
3867                 atomic_inc(&bg->nocow_writers);
3868         spin_unlock(&bg->lock);
3869
3870         /* no put on block group, done by btrfs_dec_nocow_writers */
3871         if (!ret)
3872                 btrfs_put_block_group(bg);
3873
3874         return ret;
3875
3876 }
3877
3878 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3879 {
3880         struct btrfs_block_group_cache *bg;
3881
3882         bg = btrfs_lookup_block_group(fs_info, bytenr);
3883         ASSERT(bg);
3884         if (atomic_dec_and_test(&bg->nocow_writers))
3885                 wake_up_atomic_t(&bg->nocow_writers);
3886         /*
3887          * Once for our lookup and once for the lookup done by a previous call
3888          * to btrfs_inc_nocow_writers()
3889          */
3890         btrfs_put_block_group(bg);
3891         btrfs_put_block_group(bg);
3892 }
3893
3894 static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
3895 {
3896         schedule();
3897         return 0;
3898 }
3899
3900 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3901 {
3902         wait_on_atomic_t(&bg->nocow_writers,
3903                          btrfs_wait_nocow_writers_atomic_t,
3904                          TASK_UNINTERRUPTIBLE);
3905 }
3906
3907 static const char *alloc_name(u64 flags)
3908 {
3909         switch (flags) {
3910         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3911                 return "mixed";
3912         case BTRFS_BLOCK_GROUP_METADATA:
3913                 return "metadata";
3914         case BTRFS_BLOCK_GROUP_DATA:
3915                 return "data";
3916         case BTRFS_BLOCK_GROUP_SYSTEM:
3917                 return "system";
3918         default:
3919                 WARN_ON(1);
3920                 return "invalid-combination";
3921         };
3922 }
3923
3924 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3925                              u64 total_bytes, u64 bytes_used,
3926                              u64 bytes_readonly,
3927                              struct btrfs_space_info **space_info)
3928 {
3929         struct btrfs_space_info *found;
3930         int i;
3931         int factor;
3932         int ret;
3933
3934         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3935                      BTRFS_BLOCK_GROUP_RAID10))
3936                 factor = 2;
3937         else
3938                 factor = 1;
3939
3940         found = __find_space_info(info, flags);
3941         if (found) {
3942                 spin_lock(&found->lock);
3943                 found->total_bytes += total_bytes;
3944                 found->disk_total += total_bytes * factor;
3945                 found->bytes_used += bytes_used;
3946                 found->disk_used += bytes_used * factor;
3947                 found->bytes_readonly += bytes_readonly;
3948                 if (total_bytes > 0)
3949                         found->full = 0;
3950                 space_info_add_new_bytes(info, found, total_bytes -
3951                                          bytes_used - bytes_readonly);
3952                 spin_unlock(&found->lock);
3953                 *space_info = found;
3954                 return 0;
3955         }
3956         found = kzalloc(sizeof(*found), GFP_NOFS);
3957         if (!found)
3958                 return -ENOMEM;
3959
3960         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3961         if (ret) {
3962                 kfree(found);
3963                 return ret;
3964         }
3965
3966         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3967                 INIT_LIST_HEAD(&found->block_groups[i]);
3968         init_rwsem(&found->groups_sem);
3969         spin_lock_init(&found->lock);
3970         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3971         found->total_bytes = total_bytes;
3972         found->disk_total = total_bytes * factor;
3973         found->bytes_used = bytes_used;
3974         found->disk_used = bytes_used * factor;
3975         found->bytes_pinned = 0;
3976         found->bytes_reserved = 0;
3977         found->bytes_readonly = bytes_readonly;
3978         found->bytes_may_use = 0;
3979         found->full = 0;
3980         found->max_extent_size = 0;
3981         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3982         found->chunk_alloc = 0;
3983         found->flush = 0;
3984         init_waitqueue_head(&found->wait);
3985         INIT_LIST_HEAD(&found->ro_bgs);
3986         INIT_LIST_HEAD(&found->tickets);
3987         INIT_LIST_HEAD(&found->priority_tickets);
3988
3989         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3990                                     info->space_info_kobj, "%s",
3991                                     alloc_name(found->flags));
3992         if (ret) {
3993                 kfree(found);
3994                 return ret;
3995         }
3996
3997         *space_info = found;
3998         list_add_rcu(&found->list, &info->space_info);
3999         if (flags & BTRFS_BLOCK_GROUP_DATA)
4000                 info->data_sinfo = found;
4001
4002         return ret;
4003 }
4004
4005 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
4006 {
4007         u64 extra_flags = chunk_to_extended(flags) &
4008                                 BTRFS_EXTENDED_PROFILE_MASK;
4009
4010         write_seqlock(&fs_info->profiles_lock);
4011         if (flags & BTRFS_BLOCK_GROUP_DATA)
4012                 fs_info->avail_data_alloc_bits |= extra_flags;
4013         if (flags & BTRFS_BLOCK_GROUP_METADATA)
4014                 fs_info->avail_metadata_alloc_bits |= extra_flags;
4015         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4016                 fs_info->avail_system_alloc_bits |= extra_flags;
4017         write_sequnlock(&fs_info->profiles_lock);
4018 }
4019
4020 /*
4021  * returns target flags in extended format or 0 if restripe for this
4022  * chunk_type is not in progress
4023  *
4024  * should be called with either volume_mutex or balance_lock held
4025  */
4026 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4027 {
4028         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4029         u64 target = 0;
4030
4031         if (!bctl)
4032                 return 0;
4033
4034         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4035             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4036                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4037         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4038                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4039                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4040         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4041                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4042                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4043         }
4044
4045         return target;
4046 }
4047
4048 /*
4049  * @flags: available profiles in extended format (see ctree.h)
4050  *
4051  * Returns reduced profile in chunk format.  If profile changing is in
4052  * progress (either running or paused) picks the target profile (if it's
4053  * already available), otherwise falls back to plain reducing.
4054  */
4055 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
4056 {
4057         u64 num_devices = root->fs_info->fs_devices->rw_devices;
4058         u64 target;
4059         u64 raid_type;
4060         u64 allowed = 0;
4061
4062         /*
4063          * see if restripe for this chunk_type is in progress, if so
4064          * try to reduce to the target profile
4065          */
4066         spin_lock(&root->fs_info->balance_lock);
4067         target = get_restripe_target(root->fs_info, flags);
4068         if (target) {
4069                 /* pick target profile only if it's already available */
4070                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4071                         spin_unlock(&root->fs_info->balance_lock);
4072                         return extended_to_chunk(target);
4073                 }
4074         }
4075         spin_unlock(&root->fs_info->balance_lock);
4076
4077         /* First, mask out the RAID levels which aren't possible */
4078         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4079                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4080                         allowed |= btrfs_raid_group[raid_type];
4081         }
4082         allowed &= flags;
4083
4084         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4085                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4086         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4087                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4088         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4089                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4090         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4091                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4092         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4093                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4094
4095         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4096
4097         return extended_to_chunk(flags | allowed);
4098 }
4099
4100 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
4101 {
4102         unsigned seq;
4103         u64 flags;
4104
4105         do {
4106                 flags = orig_flags;
4107                 seq = read_seqbegin(&root->fs_info->profiles_lock);
4108
4109                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4110                         flags |= root->fs_info->avail_data_alloc_bits;
4111                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4112                         flags |= root->fs_info->avail_system_alloc_bits;
4113                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4114                         flags |= root->fs_info->avail_metadata_alloc_bits;
4115         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
4116
4117         return btrfs_reduce_alloc_profile(root, flags);
4118 }
4119
4120 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4121 {
4122         u64 flags;
4123         u64 ret;
4124
4125         if (data)
4126                 flags = BTRFS_BLOCK_GROUP_DATA;
4127         else if (root == root->fs_info->chunk_root)
4128                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4129         else
4130                 flags = BTRFS_BLOCK_GROUP_METADATA;
4131
4132         ret = get_alloc_profile(root, flags);
4133         return ret;
4134 }
4135
4136 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4137 {
4138         struct btrfs_space_info *data_sinfo;
4139         struct btrfs_root *root = BTRFS_I(inode)->root;
4140         struct btrfs_fs_info *fs_info = root->fs_info;
4141         u64 used;
4142         int ret = 0;
4143         int need_commit = 2;
4144         int have_pinned_space;
4145
4146         /* make sure bytes are sectorsize aligned */
4147         bytes = ALIGN(bytes, root->sectorsize);
4148
4149         if (btrfs_is_free_space_inode(inode)) {
4150                 need_commit = 0;
4151                 ASSERT(current->journal_info);
4152         }
4153
4154         data_sinfo = fs_info->data_sinfo;
4155         if (!data_sinfo)
4156                 goto alloc;
4157
4158 again:
4159         /* make sure we have enough space to handle the data first */
4160         spin_lock(&data_sinfo->lock);
4161         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4162                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4163                 data_sinfo->bytes_may_use;
4164
4165         if (used + bytes > data_sinfo->total_bytes) {
4166                 struct btrfs_trans_handle *trans;
4167
4168                 /*
4169                  * if we don't have enough free bytes in this space then we need
4170                  * to alloc a new chunk.
4171                  */
4172                 if (!data_sinfo->full) {
4173                         u64 alloc_target;
4174
4175                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4176                         spin_unlock(&data_sinfo->lock);
4177 alloc:
4178                         alloc_target = btrfs_get_alloc_profile(root, 1);
4179                         /*
4180                          * It is ugly that we don't call nolock join
4181                          * transaction for the free space inode case here.
4182                          * But it is safe because we only do the data space
4183                          * reservation for the free space cache in the
4184                          * transaction context, the common join transaction
4185                          * just increase the counter of the current transaction
4186                          * handler, doesn't try to acquire the trans_lock of
4187                          * the fs.
4188                          */
4189                         trans = btrfs_join_transaction(root);
4190                         if (IS_ERR(trans))
4191                                 return PTR_ERR(trans);
4192
4193                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4194                                              alloc_target,
4195                                              CHUNK_ALLOC_NO_FORCE);
4196                         btrfs_end_transaction(trans, root);
4197                         if (ret < 0) {
4198                                 if (ret != -ENOSPC)
4199                                         return ret;
4200                                 else {
4201                                         have_pinned_space = 1;
4202                                         goto commit_trans;
4203                                 }
4204                         }
4205
4206                         if (!data_sinfo)
4207                                 data_sinfo = fs_info->data_sinfo;
4208
4209                         goto again;
4210                 }
4211
4212                 /*
4213                  * If we don't have enough pinned space to deal with this
4214                  * allocation, and no removed chunk in current transaction,
4215                  * don't bother committing the transaction.
4216                  */
4217                 have_pinned_space = percpu_counter_compare(
4218                         &data_sinfo->total_bytes_pinned,
4219                         used + bytes - data_sinfo->total_bytes);
4220                 spin_unlock(&data_sinfo->lock);
4221
4222                 /* commit the current transaction and try again */
4223 commit_trans:
4224                 if (need_commit &&
4225                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4226                         need_commit--;
4227
4228                         if (need_commit > 0) {
4229                                 btrfs_start_delalloc_roots(fs_info, 0, -1);
4230                                 btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
4231                         }
4232
4233                         trans = btrfs_join_transaction(root);
4234                         if (IS_ERR(trans))
4235                                 return PTR_ERR(trans);
4236                         if (have_pinned_space >= 0 ||
4237                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4238                                      &trans->transaction->flags) ||
4239                             need_commit > 0) {
4240                                 ret = btrfs_commit_transaction(trans, root);
4241                                 if (ret)
4242                                         return ret;
4243                                 /*
4244                                  * The cleaner kthread might still be doing iput
4245                                  * operations. Wait for it to finish so that
4246                                  * more space is released.
4247                                  */
4248                                 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4249                                 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4250                                 goto again;
4251                         } else {
4252                                 btrfs_end_transaction(trans, root);
4253                         }
4254                 }
4255
4256                 trace_btrfs_space_reservation(root->fs_info,
4257                                               "space_info:enospc",
4258                                               data_sinfo->flags, bytes, 1);
4259                 return -ENOSPC;
4260         }
4261         data_sinfo->bytes_may_use += bytes;
4262         trace_btrfs_space_reservation(root->fs_info, "space_info",
4263                                       data_sinfo->flags, bytes, 1);
4264         spin_unlock(&data_sinfo->lock);
4265
4266         return ret;
4267 }
4268
4269 /*
4270  * New check_data_free_space() with ability for precious data reservation
4271  * Will replace old btrfs_check_data_free_space(), but for patch split,
4272  * add a new function first and then replace it.
4273  */
4274 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4275 {
4276         struct btrfs_root *root = BTRFS_I(inode)->root;
4277         int ret;
4278
4279         /* align the range */
4280         len = round_up(start + len, root->sectorsize) -
4281               round_down(start, root->sectorsize);
4282         start = round_down(start, root->sectorsize);
4283
4284         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4285         if (ret < 0)
4286                 return ret;
4287
4288         /*
4289          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4290          *
4291          * TODO: Find a good method to avoid reserve data space for NOCOW
4292          * range, but don't impact performance on quota disable case.
4293          */
4294         ret = btrfs_qgroup_reserve_data(inode, start, len);
4295         return ret;
4296 }
4297
4298 /*
4299  * Called if we need to clear a data reservation for this inode
4300  * Normally in a error case.
4301  *
4302  * This one will *NOT* use accurate qgroup reserved space API, just for case
4303  * which we can't sleep and is sure it won't affect qgroup reserved space.
4304  * Like clear_bit_hook().
4305  */
4306 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4307                                             u64 len)
4308 {
4309         struct btrfs_root *root = BTRFS_I(inode)->root;
4310         struct btrfs_space_info *data_sinfo;
4311
4312         /* Make sure the range is aligned to sectorsize */
4313         len = round_up(start + len, root->sectorsize) -
4314               round_down(start, root->sectorsize);
4315         start = round_down(start, root->sectorsize);
4316
4317         data_sinfo = root->fs_info->data_sinfo;
4318         spin_lock(&data_sinfo->lock);
4319         if (WARN_ON(data_sinfo->bytes_may_use < len))
4320                 data_sinfo->bytes_may_use = 0;
4321         else
4322                 data_sinfo->bytes_may_use -= len;
4323         trace_btrfs_space_reservation(root->fs_info, "space_info",
4324                                       data_sinfo->flags, len, 0);
4325         spin_unlock(&data_sinfo->lock);
4326 }
4327
4328 /*
4329  * Called if we need to clear a data reservation for this inode
4330  * Normally in a error case.
4331  *
4332  * This one will handle the per-inode data rsv map for accurate reserved
4333  * space framework.
4334  */
4335 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4336 {
4337         btrfs_free_reserved_data_space_noquota(inode, start, len);
4338         btrfs_qgroup_free_data(inode, start, len);
4339 }
4340
4341 static void force_metadata_allocation(struct btrfs_fs_info *info)
4342 {
4343         struct list_head *head = &info->space_info;
4344         struct btrfs_space_info *found;
4345
4346         rcu_read_lock();
4347         list_for_each_entry_rcu(found, head, list) {
4348                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4349                         found->force_alloc = CHUNK_ALLOC_FORCE;
4350         }
4351         rcu_read_unlock();
4352 }
4353
4354 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4355 {
4356         return (global->size << 1);
4357 }
4358
4359 static int should_alloc_chunk(struct btrfs_root *root,
4360                               struct btrfs_space_info *sinfo, int force)
4361 {
4362         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4363         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4364         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4365         u64 thresh;
4366
4367         if (force == CHUNK_ALLOC_FORCE)
4368                 return 1;
4369
4370         /*
4371          * We need to take into account the global rsv because for all intents
4372          * and purposes it's used space.  Don't worry about locking the
4373          * global_rsv, it doesn't change except when the transaction commits.
4374          */
4375         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4376                 num_allocated += calc_global_rsv_need_space(global_rsv);
4377
4378         /*
4379          * in limited mode, we want to have some free space up to
4380          * about 1% of the FS size.
4381          */
4382         if (force == CHUNK_ALLOC_LIMITED) {
4383                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4384                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4385
4386                 if (num_bytes - num_allocated < thresh)
4387                         return 1;
4388         }
4389
4390         if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4391                 return 0;
4392         return 1;
4393 }
4394
4395 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4396 {
4397         u64 num_dev;
4398
4399         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4400                     BTRFS_BLOCK_GROUP_RAID0 |
4401                     BTRFS_BLOCK_GROUP_RAID5 |
4402                     BTRFS_BLOCK_GROUP_RAID6))
4403                 num_dev = root->fs_info->fs_devices->rw_devices;
4404         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4405                 num_dev = 2;
4406         else
4407                 num_dev = 1;    /* DUP or single */
4408
4409         return num_dev;
4410 }
4411
4412 /*
4413  * If @is_allocation is true, reserve space in the system space info necessary
4414  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4415  * removing a chunk.
4416  */
4417 void check_system_chunk(struct btrfs_trans_handle *trans,
4418                         struct btrfs_root *root,
4419                         u64 type)
4420 {
4421         struct btrfs_space_info *info;
4422         u64 left;
4423         u64 thresh;
4424         int ret = 0;
4425         u64 num_devs;
4426
4427         /*
4428          * Needed because we can end up allocating a system chunk and for an
4429          * atomic and race free space reservation in the chunk block reserve.
4430          */
4431         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4432
4433         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4434         spin_lock(&info->lock);
4435         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4436                 info->bytes_reserved - info->bytes_readonly -
4437                 info->bytes_may_use;
4438         spin_unlock(&info->lock);
4439
4440         num_devs = get_profile_num_devs(root, type);
4441
4442         /* num_devs device items to update and 1 chunk item to add or remove */
4443         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4444                 btrfs_calc_trans_metadata_size(root, 1);
4445
4446         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4447                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4448                         left, thresh, type);
4449                 dump_space_info(info, 0, 0);
4450         }
4451
4452         if (left < thresh) {
4453                 u64 flags;
4454
4455                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4456                 /*
4457                  * Ignore failure to create system chunk. We might end up not
4458                  * needing it, as we might not need to COW all nodes/leafs from
4459                  * the paths we visit in the chunk tree (they were already COWed
4460                  * or created in the current transaction for example).
4461                  */
4462                 ret = btrfs_alloc_chunk(trans, root, flags);
4463         }
4464
4465         if (!ret) {
4466                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4467                                           &root->fs_info->chunk_block_rsv,
4468                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4469                 if (!ret)
4470                         trans->chunk_bytes_reserved += thresh;
4471         }
4472 }
4473
4474 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4475                           struct btrfs_root *extent_root, u64 flags, int force)
4476 {
4477         struct btrfs_space_info *space_info;
4478         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4479         int wait_for_alloc = 0;
4480         int ret = 0;
4481
4482         /* Don't re-enter if we're already allocating a chunk */
4483         if (trans->allocating_chunk)
4484                 return -ENOSPC;
4485
4486         space_info = __find_space_info(extent_root->fs_info, flags);
4487         if (!space_info) {
4488                 ret = update_space_info(extent_root->fs_info, flags,
4489                                         0, 0, 0, &space_info);
4490                 BUG_ON(ret); /* -ENOMEM */
4491         }
4492         BUG_ON(!space_info); /* Logic error */
4493
4494 again:
4495         spin_lock(&space_info->lock);
4496         if (force < space_info->force_alloc)
4497                 force = space_info->force_alloc;
4498         if (space_info->full) {
4499                 if (should_alloc_chunk(extent_root, space_info, force))
4500                         ret = -ENOSPC;
4501                 else
4502                         ret = 0;
4503                 spin_unlock(&space_info->lock);
4504                 return ret;
4505         }
4506
4507         if (!should_alloc_chunk(extent_root, space_info, force)) {
4508                 spin_unlock(&space_info->lock);
4509                 return 0;
4510         } else if (space_info->chunk_alloc) {
4511                 wait_for_alloc = 1;
4512         } else {
4513                 space_info->chunk_alloc = 1;
4514         }
4515
4516         spin_unlock(&space_info->lock);
4517
4518         mutex_lock(&fs_info->chunk_mutex);
4519
4520         /*
4521          * The chunk_mutex is held throughout the entirety of a chunk
4522          * allocation, so once we've acquired the chunk_mutex we know that the
4523          * other guy is done and we need to recheck and see if we should
4524          * allocate.
4525          */
4526         if (wait_for_alloc) {
4527                 mutex_unlock(&fs_info->chunk_mutex);
4528                 wait_for_alloc = 0;
4529                 goto again;
4530         }
4531
4532         trans->allocating_chunk = true;
4533
4534         /*
4535          * If we have mixed data/metadata chunks we want to make sure we keep
4536          * allocating mixed chunks instead of individual chunks.
4537          */
4538         if (btrfs_mixed_space_info(space_info))
4539                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4540
4541         /*
4542          * if we're doing a data chunk, go ahead and make sure that
4543          * we keep a reasonable number of metadata chunks allocated in the
4544          * FS as well.
4545          */
4546         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4547                 fs_info->data_chunk_allocations++;
4548                 if (!(fs_info->data_chunk_allocations %
4549                       fs_info->metadata_ratio))
4550                         force_metadata_allocation(fs_info);
4551         }
4552
4553         /*
4554          * Check if we have enough space in SYSTEM chunk because we may need
4555          * to update devices.
4556          */
4557         check_system_chunk(trans, extent_root, flags);
4558
4559         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4560         trans->allocating_chunk = false;
4561
4562         spin_lock(&space_info->lock);
4563         if (ret < 0 && ret != -ENOSPC)
4564                 goto out;
4565         if (ret)
4566                 space_info->full = 1;
4567         else
4568                 ret = 1;
4569
4570         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4571 out:
4572         space_info->chunk_alloc = 0;
4573         spin_unlock(&space_info->lock);
4574         mutex_unlock(&fs_info->chunk_mutex);
4575         /*
4576          * When we allocate a new chunk we reserve space in the chunk block
4577          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4578          * add new nodes/leafs to it if we end up needing to do it when
4579          * inserting the chunk item and updating device items as part of the
4580          * second phase of chunk allocation, performed by
4581          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4582          * large number of new block groups to create in our transaction
4583          * handle's new_bgs list to avoid exhausting the chunk block reserve
4584          * in extreme cases - like having a single transaction create many new
4585          * block groups when starting to write out the free space caches of all
4586          * the block groups that were made dirty during the lifetime of the
4587          * transaction.
4588          */
4589         if (trans->can_flush_pending_bgs &&
4590             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4591                 btrfs_create_pending_block_groups(trans, trans->root);
4592                 btrfs_trans_release_chunk_metadata(trans);
4593         }
4594         return ret;
4595 }
4596
4597 static int can_overcommit(struct btrfs_root *root,
4598                           struct btrfs_space_info *space_info, u64 bytes,
4599                           enum btrfs_reserve_flush_enum flush)
4600 {
4601         struct btrfs_block_rsv *global_rsv;
4602         u64 profile;
4603         u64 space_size;
4604         u64 avail;
4605         u64 used;
4606
4607         /* Don't overcommit when in mixed mode. */
4608         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4609                 return 0;
4610
4611         BUG_ON(root->fs_info == NULL);
4612         global_rsv = &root->fs_info->global_block_rsv;
4613         profile = btrfs_get_alloc_profile(root, 0);
4614         used = space_info->bytes_used + space_info->bytes_reserved +
4615                 space_info->bytes_pinned + space_info->bytes_readonly;
4616
4617         /*
4618          * We only want to allow over committing if we have lots of actual space
4619          * free, but if we don't have enough space to handle the global reserve
4620          * space then we could end up having a real enospc problem when trying
4621          * to allocate a chunk or some other such important allocation.
4622          */
4623         spin_lock(&global_rsv->lock);
4624         space_size = calc_global_rsv_need_space(global_rsv);
4625         spin_unlock(&global_rsv->lock);
4626         if (used + space_size >= space_info->total_bytes)
4627                 return 0;
4628
4629         used += space_info->bytes_may_use;
4630
4631         spin_lock(&root->fs_info->free_chunk_lock);
4632         avail = root->fs_info->free_chunk_space;
4633         spin_unlock(&root->fs_info->free_chunk_lock);
4634
4635         /*
4636          * If we have dup, raid1 or raid10 then only half of the free
4637          * space is actually useable.  For raid56, the space info used
4638          * doesn't include the parity drive, so we don't have to
4639          * change the math
4640          */
4641         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4642                        BTRFS_BLOCK_GROUP_RAID1 |
4643                        BTRFS_BLOCK_GROUP_RAID10))
4644                 avail >>= 1;
4645
4646         /*
4647          * If we aren't flushing all things, let us overcommit up to
4648          * 1/2th of the space. If we can flush, don't let us overcommit
4649          * too much, let it overcommit up to 1/8 of the space.
4650          */
4651         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4652                 avail >>= 3;
4653         else
4654                 avail >>= 1;
4655
4656         if (used + bytes < space_info->total_bytes + avail)
4657                 return 1;
4658         return 0;
4659 }
4660
4661 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4662                                          unsigned long nr_pages, int nr_items)
4663 {
4664         struct super_block *sb = root->fs_info->sb;
4665
4666         if (down_read_trylock(&sb->s_umount)) {
4667                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4668                 up_read(&sb->s_umount);
4669         } else {
4670                 /*
4671                  * We needn't worry the filesystem going from r/w to r/o though
4672                  * we don't acquire ->s_umount mutex, because the filesystem
4673                  * should guarantee the delalloc inodes list be empty after
4674                  * the filesystem is readonly(all dirty pages are written to
4675                  * the disk).
4676                  */
4677                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4678                 if (!current->journal_info)
4679                         btrfs_wait_ordered_roots(root->fs_info, nr_items,
4680                                                  0, (u64)-1);
4681         }
4682 }
4683
4684 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4685 {
4686         u64 bytes;
4687         int nr;
4688
4689         bytes = btrfs_calc_trans_metadata_size(root, 1);
4690         nr = (int)div64_u64(to_reclaim, bytes);
4691         if (!nr)
4692                 nr = 1;
4693         return nr;
4694 }
4695
4696 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4697
4698 /*
4699  * shrink metadata reservation for delalloc
4700  */
4701 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4702                             bool wait_ordered)
4703 {
4704         struct btrfs_block_rsv *block_rsv;
4705         struct btrfs_space_info *space_info;
4706         struct btrfs_trans_handle *trans;
4707         u64 delalloc_bytes;
4708         u64 max_reclaim;
4709         long time_left;
4710         unsigned long nr_pages;
4711         int loops;
4712         int items;
4713         enum btrfs_reserve_flush_enum flush;
4714
4715         /* Calc the number of the pages we need flush for space reservation */
4716         items = calc_reclaim_items_nr(root, to_reclaim);
4717         to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
4718
4719         trans = (struct btrfs_trans_handle *)current->journal_info;
4720         block_rsv = &root->fs_info->delalloc_block_rsv;
4721         space_info = block_rsv->space_info;
4722
4723         delalloc_bytes = percpu_counter_sum_positive(
4724                                                 &root->fs_info->delalloc_bytes);
4725         if (delalloc_bytes == 0) {
4726                 if (trans)
4727                         return;
4728                 if (wait_ordered)
4729                         btrfs_wait_ordered_roots(root->fs_info, items,
4730                                                  0, (u64)-1);
4731                 return;
4732         }
4733
4734         loops = 0;
4735         while (delalloc_bytes && loops < 3) {
4736                 max_reclaim = min(delalloc_bytes, to_reclaim);
4737                 nr_pages = max_reclaim >> PAGE_SHIFT;
4738                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4739                 /*
4740                  * We need to wait for the async pages to actually start before
4741                  * we do anything.
4742                  */
4743                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4744                 if (!max_reclaim)
4745                         goto skip_async;
4746
4747                 if (max_reclaim <= nr_pages)
4748                         max_reclaim = 0;
4749                 else
4750                         max_reclaim -= nr_pages;
4751
4752                 wait_event(root->fs_info->async_submit_wait,
4753                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4754                            (int)max_reclaim);
4755 skip_async:
4756                 if (!trans)
4757                         flush = BTRFS_RESERVE_FLUSH_ALL;
4758                 else
4759                         flush = BTRFS_RESERVE_NO_FLUSH;
4760                 spin_lock(&space_info->lock);
4761                 if (can_overcommit(root, space_info, orig, flush)) {
4762                         spin_unlock(&space_info->lock);
4763                         break;
4764                 }
4765                 if (list_empty(&space_info->tickets) &&
4766                     list_empty(&space_info->priority_tickets)) {
4767                         spin_unlock(&space_info->lock);
4768                         break;
4769                 }
4770                 spin_unlock(&space_info->lock);
4771
4772                 loops++;
4773                 if (wait_ordered && !trans) {
4774                         btrfs_wait_ordered_roots(root->fs_info, items,
4775                                                  0, (u64)-1);
4776                 } else {
4777                         time_left = schedule_timeout_killable(1);
4778                         if (time_left)
4779                                 break;
4780                 }
4781                 delalloc_bytes = percpu_counter_sum_positive(
4782                                                 &root->fs_info->delalloc_bytes);
4783         }
4784 }
4785
4786 /**
4787  * maybe_commit_transaction - possibly commit the transaction if its ok to
4788  * @root - the root we're allocating for
4789  * @bytes - the number of bytes we want to reserve
4790  * @force - force the commit
4791  *
4792  * This will check to make sure that committing the transaction will actually
4793  * get us somewhere and then commit the transaction if it does.  Otherwise it
4794  * will return -ENOSPC.
4795  */
4796 static int may_commit_transaction(struct btrfs_root *root,
4797                                   struct btrfs_space_info *space_info,
4798                                   u64 bytes, int force)
4799 {
4800         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4801         struct btrfs_trans_handle *trans;
4802
4803         trans = (struct btrfs_trans_handle *)current->journal_info;
4804         if (trans)
4805                 return -EAGAIN;
4806
4807         if (force)
4808                 goto commit;
4809
4810         /* See if there is enough pinned space to make this reservation */
4811         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4812                                    bytes) >= 0)
4813                 goto commit;
4814
4815         /*
4816          * See if there is some space in the delayed insertion reservation for
4817          * this reservation.
4818          */
4819         if (space_info != delayed_rsv->space_info)
4820                 return -ENOSPC;
4821
4822         spin_lock(&delayed_rsv->lock);
4823         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4824                                    bytes - delayed_rsv->size) >= 0) {
4825                 spin_unlock(&delayed_rsv->lock);
4826                 return -ENOSPC;
4827         }
4828         spin_unlock(&delayed_rsv->lock);
4829
4830 commit:
4831         trans = btrfs_join_transaction(root);
4832         if (IS_ERR(trans))
4833                 return -ENOSPC;
4834
4835         return btrfs_commit_transaction(trans, root);
4836 }
4837
4838 struct reserve_ticket {
4839         u64 bytes;
4840         int error;
4841         struct list_head list;
4842         wait_queue_head_t wait;
4843 };
4844
4845 static int flush_space(struct btrfs_root *root,
4846                        struct btrfs_space_info *space_info, u64 num_bytes,
4847                        u64 orig_bytes, int state)
4848 {
4849         struct btrfs_trans_handle *trans;
4850         int nr;
4851         int ret = 0;
4852
4853         switch (state) {
4854         case FLUSH_DELAYED_ITEMS_NR:
4855         case FLUSH_DELAYED_ITEMS:
4856                 if (state == FLUSH_DELAYED_ITEMS_NR)
4857                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4858                 else
4859                         nr = -1;
4860
4861                 trans = btrfs_join_transaction(root);
4862                 if (IS_ERR(trans)) {
4863                         ret = PTR_ERR(trans);
4864                         break;
4865                 }
4866                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4867                 btrfs_end_transaction(trans, root);
4868                 break;
4869         case FLUSH_DELALLOC:
4870         case FLUSH_DELALLOC_WAIT:
4871                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4872                                 state == FLUSH_DELALLOC_WAIT);
4873                 break;
4874         case ALLOC_CHUNK:
4875                 trans = btrfs_join_transaction(root);
4876                 if (IS_ERR(trans)) {
4877                         ret = PTR_ERR(trans);
4878                         break;
4879                 }
4880                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4881                                      btrfs_get_alloc_profile(root, 0),
4882                                      CHUNK_ALLOC_NO_FORCE);
4883                 btrfs_end_transaction(trans, root);
4884                 if (ret == -ENOSPC)
4885                         ret = 0;
4886                 break;
4887         case COMMIT_TRANS:
4888                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4889                 break;
4890         default:
4891                 ret = -ENOSPC;
4892                 break;
4893         }
4894
4895         trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
4896                                 orig_bytes, state, ret);
4897         return ret;
4898 }
4899
4900 static inline u64
4901 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4902                                  struct btrfs_space_info *space_info)
4903 {
4904         struct reserve_ticket *ticket;
4905         u64 used;
4906         u64 expected;
4907         u64 to_reclaim = 0;
4908
4909         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4910         if (can_overcommit(root, space_info, to_reclaim,
4911                            BTRFS_RESERVE_FLUSH_ALL))
4912                 return 0;
4913
4914         list_for_each_entry(ticket, &space_info->tickets, list)
4915                 to_reclaim += ticket->bytes;
4916         list_for_each_entry(ticket, &space_info->priority_tickets, list)
4917                 to_reclaim += ticket->bytes;
4918         if (to_reclaim)
4919                 return to_reclaim;
4920
4921         used = space_info->bytes_used + space_info->bytes_reserved +
4922                space_info->bytes_pinned + space_info->bytes_readonly +
4923                space_info->bytes_may_use;
4924         if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4925                 expected = div_factor_fine(space_info->total_bytes, 95);
4926         else
4927                 expected = div_factor_fine(space_info->total_bytes, 90);
4928
4929         if (used > expected)
4930                 to_reclaim = used - expected;
4931         else
4932                 to_reclaim = 0;
4933         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4934                                      space_info->bytes_reserved);
4935         return to_reclaim;
4936 }
4937
4938 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4939                                         struct btrfs_root *root, u64 used)
4940 {
4941         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4942
4943         /* If we're just plain full then async reclaim just slows us down. */
4944         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4945                 return 0;
4946
4947         if (!btrfs_calc_reclaim_metadata_size(root, space_info))
4948                 return 0;
4949
4950         return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
4951                 !test_bit(BTRFS_FS_STATE_REMOUNTING,
4952                           &root->fs_info->fs_state));
4953 }
4954
4955 static void wake_all_tickets(struct list_head *head)
4956 {
4957         struct reserve_ticket *ticket;
4958
4959         while (!list_empty(head)) {
4960                 ticket = list_first_entry(head, struct reserve_ticket, list);
4961                 list_del_init(&ticket->list);
4962                 ticket->error = -ENOSPC;
4963                 wake_up(&ticket->wait);
4964         }
4965 }
4966
4967 /*
4968  * This is for normal flushers, we can wait all goddamned day if we want to.  We
4969  * will loop and continuously try to flush as long as we are making progress.
4970  * We count progress as clearing off tickets each time we have to loop.
4971  */
4972 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4973 {
4974         struct reserve_ticket *last_ticket = NULL;
4975         struct btrfs_fs_info *fs_info;
4976         struct btrfs_space_info *space_info;
4977         u64 to_reclaim;
4978         int flush_state;
4979         int commit_cycles = 0;
4980
4981         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4982         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4983
4984         spin_lock(&space_info->lock);
4985         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4986                                                       space_info);
4987         if (!to_reclaim) {
4988                 space_info->flush = 0;
4989                 spin_unlock(&space_info->lock);
4990                 return;
4991         }
4992         last_ticket = list_first_entry(&space_info->tickets,
4993                                        struct reserve_ticket, list);
4994         spin_unlock(&space_info->lock);
4995
4996         flush_state = FLUSH_DELAYED_ITEMS_NR;
4997         do {
4998                 struct reserve_ticket *ticket;
4999                 int ret;
5000
5001                 ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
5002                             to_reclaim, flush_state);
5003                 spin_lock(&space_info->lock);
5004                 if (list_empty(&space_info->tickets)) {
5005                         space_info->flush = 0;
5006                         spin_unlock(&space_info->lock);
5007                         return;
5008                 }
5009                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5010                                                               space_info);
5011                 ticket = list_first_entry(&space_info->tickets,
5012                                           struct reserve_ticket, list);
5013                 if (last_ticket == ticket) {
5014                         flush_state++;
5015                 } else {
5016                         last_ticket = ticket;
5017                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5018                         if (commit_cycles)
5019                                 commit_cycles--;
5020                 }
5021
5022                 if (flush_state > COMMIT_TRANS) {
5023                         commit_cycles++;
5024                         if (commit_cycles > 2) {
5025                                 wake_all_tickets(&space_info->tickets);
5026                                 space_info->flush = 0;
5027                         } else {
5028                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
5029                         }
5030                 }
5031                 spin_unlock(&space_info->lock);
5032         } while (flush_state <= COMMIT_TRANS);
5033 }
5034
5035 void btrfs_init_async_reclaim_work(struct work_struct *work)
5036 {
5037         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5038 }
5039
5040 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5041                                             struct btrfs_space_info *space_info,
5042                                             struct reserve_ticket *ticket)
5043 {
5044         u64 to_reclaim;
5045         int flush_state = FLUSH_DELAYED_ITEMS_NR;
5046
5047         spin_lock(&space_info->lock);
5048         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5049                                                       space_info);
5050         if (!to_reclaim) {
5051                 spin_unlock(&space_info->lock);
5052                 return;
5053         }
5054         spin_unlock(&space_info->lock);
5055
5056         do {
5057                 flush_space(fs_info->fs_root, space_info, to_reclaim,
5058                             to_reclaim, flush_state);
5059                 flush_state++;
5060                 spin_lock(&space_info->lock);
5061                 if (ticket->bytes == 0) {
5062                         spin_unlock(&space_info->lock);
5063                         return;
5064                 }
5065                 spin_unlock(&space_info->lock);
5066
5067                 /*
5068                  * Priority flushers can't wait on delalloc without
5069                  * deadlocking.
5070                  */
5071                 if (flush_state == FLUSH_DELALLOC ||
5072                     flush_state == FLUSH_DELALLOC_WAIT)
5073                         flush_state = ALLOC_CHUNK;
5074         } while (flush_state < COMMIT_TRANS);
5075 }
5076
5077 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5078                                struct btrfs_space_info *space_info,
5079                                struct reserve_ticket *ticket, u64 orig_bytes)
5080
5081 {
5082         DEFINE_WAIT(wait);
5083         int ret = 0;
5084
5085         spin_lock(&space_info->lock);
5086         while (ticket->bytes > 0 && ticket->error == 0) {
5087                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5088                 if (ret) {
5089                         ret = -EINTR;
5090                         break;
5091                 }
5092                 spin_unlock(&space_info->lock);
5093
5094                 schedule();
5095
5096                 finish_wait(&ticket->wait, &wait);
5097                 spin_lock(&space_info->lock);
5098         }
5099         if (!ret)
5100                 ret = ticket->error;
5101         if (!list_empty(&ticket->list))
5102                 list_del_init(&ticket->list);
5103         if (ticket->bytes && ticket->bytes < orig_bytes) {
5104                 u64 num_bytes = orig_bytes - ticket->bytes;
5105                 space_info->bytes_may_use -= num_bytes;
5106                 trace_btrfs_space_reservation(fs_info, "space_info",
5107                                               space_info->flags, num_bytes, 0);
5108         }
5109         spin_unlock(&space_info->lock);
5110
5111         return ret;
5112 }
5113
5114 /**
5115  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5116  * @root - the root we're allocating for
5117  * @space_info - the space info we want to allocate from
5118  * @orig_bytes - the number of bytes we want
5119  * @flush - whether or not we can flush to make our reservation
5120  *
5121  * This will reserve orig_bytes number of bytes from the space info associated
5122  * with the block_rsv.  If there is not enough space it will make an attempt to
5123  * flush out space to make room.  It will do this by flushing delalloc if
5124  * possible or committing the transaction.  If flush is 0 then no attempts to
5125  * regain reservations will be made and this will fail if there is not enough
5126  * space already.
5127  */
5128 static int __reserve_metadata_bytes(struct btrfs_root *root,
5129                                     struct btrfs_space_info *space_info,
5130                                     u64 orig_bytes,
5131                                     enum btrfs_reserve_flush_enum flush)
5132 {
5133         struct reserve_ticket ticket;
5134         u64 used;
5135         int ret = 0;
5136
5137         ASSERT(orig_bytes);
5138         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5139
5140         spin_lock(&space_info->lock);
5141         ret = -ENOSPC;
5142         used = space_info->bytes_used + space_info->bytes_reserved +
5143                 space_info->bytes_pinned + space_info->bytes_readonly +
5144                 space_info->bytes_may_use;
5145
5146         /*
5147          * If we have enough space then hooray, make our reservation and carry
5148          * on.  If not see if we can overcommit, and if we can, hooray carry on.
5149          * If not things get more complicated.
5150          */
5151         if (used + orig_bytes <= space_info->total_bytes) {
5152                 space_info->bytes_may_use += orig_bytes;
5153                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5154                                               space_info->flags, orig_bytes,
5155                                               1);
5156                 ret = 0;
5157         } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
5158                 space_info->bytes_may_use += orig_bytes;
5159                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5160                                               space_info->flags, orig_bytes,
5161                                               1);
5162                 ret = 0;
5163         }
5164
5165         /*
5166          * If we couldn't make a reservation then setup our reservation ticket
5167          * and kick the async worker if it's not already running.
5168          *
5169          * If we are a priority flusher then we just need to add our ticket to
5170          * the list and we will do our own flushing further down.
5171          */
5172         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5173                 ticket.bytes = orig_bytes;
5174                 ticket.error = 0;
5175                 init_waitqueue_head(&ticket.wait);
5176                 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5177                         list_add_tail(&ticket.list, &space_info->tickets);
5178                         if (!space_info->flush) {
5179                                 space_info->flush = 1;
5180                                 trace_btrfs_trigger_flush(root->fs_info,
5181                                                           space_info->flags,
5182                                                           orig_bytes, flush,
5183                                                           "enospc");
5184                                 queue_work(system_unbound_wq,
5185                                            &root->fs_info->async_reclaim_work);
5186                         }
5187                 } else {
5188                         list_add_tail(&ticket.list,
5189                                       &space_info->priority_tickets);
5190                 }
5191         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5192                 used += orig_bytes;
5193                 /*
5194                  * We will do the space reservation dance during log replay,
5195                  * which means we won't have fs_info->fs_root set, so don't do
5196                  * the async reclaim as we will panic.
5197                  */
5198                 if (!root->fs_info->log_root_recovering &&
5199                     need_do_async_reclaim(space_info, root, used) &&
5200                     !work_busy(&root->fs_info->async_reclaim_work)) {
5201                         trace_btrfs_trigger_flush(root->fs_info,
5202                                                   space_info->flags,
5203                                                   orig_bytes, flush,
5204                                                   "preempt");
5205                         queue_work(system_unbound_wq,
5206                                    &root->fs_info->async_reclaim_work);
5207                 }
5208         }
5209         spin_unlock(&space_info->lock);
5210         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5211                 return ret;
5212
5213         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5214                 return wait_reserve_ticket(root->fs_info, space_info, &ticket,
5215                                            orig_bytes);
5216
5217         ret = 0;
5218         priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
5219         spin_lock(&space_info->lock);
5220         if (ticket.bytes) {
5221                 if (ticket.bytes < orig_bytes) {
5222                         u64 num_bytes = orig_bytes - ticket.bytes;
5223                         space_info->bytes_may_use -= num_bytes;
5224                         trace_btrfs_space_reservation(root->fs_info,
5225                                         "space_info", space_info->flags,
5226                                         num_bytes, 0);
5227
5228                 }
5229                 list_del_init(&ticket.list);
5230                 ret = -ENOSPC;
5231         }
5232         spin_unlock(&space_info->lock);
5233         ASSERT(list_empty(&ticket.list));
5234         return ret;
5235 }
5236
5237 /**
5238  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5239  * @root - the root we're allocating for
5240  * @block_rsv - the block_rsv we're allocating for
5241  * @orig_bytes - the number of bytes we want
5242  * @flush - whether or not we can flush to make our reservation
5243  *
5244  * This will reserve orgi_bytes number of bytes from the space info associated
5245  * with the block_rsv.  If there is not enough space it will make an attempt to
5246  * flush out space to make room.  It will do this by flushing delalloc if
5247  * possible or committing the transaction.  If flush is 0 then no attempts to
5248  * regain reservations will be made and this will fail if there is not enough
5249  * space already.
5250  */
5251 static int reserve_metadata_bytes(struct btrfs_root *root,
5252                                   struct btrfs_block_rsv *block_rsv,
5253                                   u64 orig_bytes,
5254                                   enum btrfs_reserve_flush_enum flush)
5255 {
5256         int ret;
5257
5258         ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
5259                                        flush);
5260         if (ret == -ENOSPC &&
5261             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5262                 struct btrfs_block_rsv *global_rsv =
5263                         &root->fs_info->global_block_rsv;
5264
5265                 if (block_rsv != global_rsv &&
5266                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5267                         ret = 0;
5268         }
5269         if (ret == -ENOSPC)
5270                 trace_btrfs_space_reservation(root->fs_info,
5271                                               "space_info:enospc",
5272                                               block_rsv->space_info->flags,
5273                                               orig_bytes, 1);
5274         return ret;
5275 }
5276
5277 static struct btrfs_block_rsv *get_block_rsv(
5278                                         const struct btrfs_trans_handle *trans,
5279                                         const struct btrfs_root *root)
5280 {
5281         struct btrfs_block_rsv *block_rsv = NULL;
5282
5283         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5284             (root == root->fs_info->csum_root && trans->adding_csums) ||
5285              (root == root->fs_info->uuid_root))
5286                 block_rsv = trans->block_rsv;
5287
5288         if (!block_rsv)
5289                 block_rsv = root->block_rsv;
5290
5291         if (!block_rsv)
5292                 block_rsv = &root->fs_info->empty_block_rsv;
5293
5294         return block_rsv;
5295 }
5296
5297 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5298                                u64 num_bytes)
5299 {
5300         int ret = -ENOSPC;
5301         spin_lock(&block_rsv->lock);
5302         if (block_rsv->reserved >= num_bytes) {
5303                 block_rsv->reserved -= num_bytes;
5304                 if (block_rsv->reserved < block_rsv->size)
5305                         block_rsv->full = 0;
5306                 ret = 0;
5307         }
5308         spin_unlock(&block_rsv->lock);
5309         return ret;
5310 }
5311
5312 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5313                                 u64 num_bytes, int update_size)
5314 {
5315         spin_lock(&block_rsv->lock);
5316         block_rsv->reserved += num_bytes;
5317         if (update_size)
5318                 block_rsv->size += num_bytes;
5319         else if (block_rsv->reserved >= block_rsv->size)
5320                 block_rsv->full = 1;
5321         spin_unlock(&block_rsv->lock);
5322 }
5323
5324 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5325                              struct btrfs_block_rsv *dest, u64 num_bytes,
5326                              int min_factor)
5327 {
5328         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5329         u64 min_bytes;
5330
5331         if (global_rsv->space_info != dest->space_info)
5332                 return -ENOSPC;
5333
5334         spin_lock(&global_rsv->lock);
5335         min_bytes = div_factor(global_rsv->size, min_factor);
5336         if (global_rsv->reserved < min_bytes + num_bytes) {
5337                 spin_unlock(&global_rsv->lock);
5338                 return -ENOSPC;
5339         }
5340         global_rsv->reserved -= num_bytes;
5341         if (global_rsv->reserved < global_rsv->size)
5342                 global_rsv->full = 0;
5343         spin_unlock(&global_rsv->lock);
5344
5345         block_rsv_add_bytes(dest, num_bytes, 1);
5346         return 0;
5347 }
5348
5349 /*
5350  * This is for space we already have accounted in space_info->bytes_may_use, so
5351  * basically when we're returning space from block_rsv's.
5352  */
5353 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5354                                      struct btrfs_space_info *space_info,
5355                                      u64 num_bytes)
5356 {
5357         struct reserve_ticket *ticket;
5358         struct list_head *head;
5359         u64 used;
5360         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5361         bool check_overcommit = false;
5362
5363         spin_lock(&space_info->lock);
5364         head = &space_info->priority_tickets;
5365
5366         /*
5367          * If we are over our limit then we need to check and see if we can
5368          * overcommit, and if we can't then we just need to free up our space
5369          * and not satisfy any requests.
5370          */
5371         used = space_info->bytes_used + space_info->bytes_reserved +
5372                 space_info->bytes_pinned + space_info->bytes_readonly +
5373                 space_info->bytes_may_use;
5374         if (used - num_bytes >= space_info->total_bytes)
5375                 check_overcommit = true;
5376 again:
5377         while (!list_empty(head) && num_bytes) {
5378                 ticket = list_first_entry(head, struct reserve_ticket,
5379                                           list);
5380                 /*
5381                  * We use 0 bytes because this space is already reserved, so
5382                  * adding the ticket space would be a double count.
5383                  */
5384                 if (check_overcommit &&
5385                     !can_overcommit(fs_info->extent_root, space_info, 0,
5386                                     flush))
5387                         break;
5388                 if (num_bytes >= ticket->bytes) {
5389                         list_del_init(&ticket->list);
5390                         num_bytes -= ticket->bytes;
5391                         ticket->bytes = 0;
5392                         wake_up(&ticket->wait);
5393                 } else {
5394                         ticket->bytes -= num_bytes;
5395                         num_bytes = 0;
5396                 }
5397         }
5398
5399         if (num_bytes && head == &space_info->priority_tickets) {
5400                 head = &space_info->tickets;
5401                 flush = BTRFS_RESERVE_FLUSH_ALL;
5402                 goto again;
5403         }
5404         space_info->bytes_may_use -= num_bytes;
5405         trace_btrfs_space_reservation(fs_info, "space_info",
5406                                       space_info->flags, num_bytes, 0);
5407         spin_unlock(&space_info->lock);
5408 }
5409
5410 /*
5411  * This is for newly allocated space that isn't accounted in
5412  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5413  * we use this helper.
5414  */
5415 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5416                                      struct btrfs_space_info *space_info,
5417                                      u64 num_bytes)
5418 {
5419         struct reserve_ticket *ticket;
5420         struct list_head *head = &space_info->priority_tickets;
5421
5422 again:
5423         while (!list_empty(head) && num_bytes) {
5424                 ticket = list_first_entry(head, struct reserve_ticket,
5425                                           list);
5426                 if (num_bytes >= ticket->bytes) {
5427                         trace_btrfs_space_reservation(fs_info, "space_info",
5428                                                       space_info->flags,
5429                                                       ticket->bytes, 1);
5430                         list_del_init(&ticket->list);
5431                         num_bytes -= ticket->bytes;
5432                         space_info->bytes_may_use += ticket->bytes;
5433                         ticket->bytes = 0;
5434                         wake_up(&ticket->wait);
5435                 } else {
5436                         trace_btrfs_space_reservation(fs_info, "space_info",
5437                                                       space_info->flags,
5438                                                       num_bytes, 1);
5439                         space_info->bytes_may_use += num_bytes;
5440                         ticket->bytes -= num_bytes;
5441                         num_bytes = 0;
5442                 }
5443         }
5444
5445         if (num_bytes && head == &space_info->priority_tickets) {
5446                 head = &space_info->tickets;
5447                 goto again;
5448         }
5449 }
5450
5451 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5452                                     struct btrfs_block_rsv *block_rsv,
5453                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5454 {
5455         struct btrfs_space_info *space_info = block_rsv->space_info;
5456
5457         spin_lock(&block_rsv->lock);
5458         if (num_bytes == (u64)-1)
5459                 num_bytes = block_rsv->size;
5460         block_rsv->size -= num_bytes;
5461         if (block_rsv->reserved >= block_rsv->size) {
5462                 num_bytes = block_rsv->reserved - block_rsv->size;
5463                 block_rsv->reserved = block_rsv->size;
5464                 block_rsv->full = 1;
5465         } else {
5466                 num_bytes = 0;
5467         }
5468         spin_unlock(&block_rsv->lock);
5469
5470         if (num_bytes > 0) {
5471                 if (dest) {
5472                         spin_lock(&dest->lock);
5473                         if (!dest->full) {
5474                                 u64 bytes_to_add;
5475
5476                                 bytes_to_add = dest->size - dest->reserved;
5477                                 bytes_to_add = min(num_bytes, bytes_to_add);
5478                                 dest->reserved += bytes_to_add;
5479                                 if (dest->reserved >= dest->size)
5480                                         dest->full = 1;
5481                                 num_bytes -= bytes_to_add;
5482                         }
5483                         spin_unlock(&dest->lock);
5484                 }
5485                 if (num_bytes)
5486                         space_info_add_old_bytes(fs_info, space_info,
5487                                                  num_bytes);
5488         }
5489 }
5490
5491 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5492                             struct btrfs_block_rsv *dst, u64 num_bytes,
5493                             int update_size)
5494 {
5495         int ret;
5496
5497         ret = block_rsv_use_bytes(src, num_bytes);
5498         if (ret)
5499                 return ret;
5500
5501         block_rsv_add_bytes(dst, num_bytes, update_size);
5502         return 0;
5503 }
5504
5505 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5506 {
5507         memset(rsv, 0, sizeof(*rsv));
5508         spin_lock_init(&rsv->lock);
5509         rsv->type = type;
5510 }
5511
5512 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5513                                               unsigned short type)
5514 {
5515         struct btrfs_block_rsv *block_rsv;
5516         struct btrfs_fs_info *fs_info = root->fs_info;
5517
5518         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5519         if (!block_rsv)
5520                 return NULL;
5521
5522         btrfs_init_block_rsv(block_rsv, type);
5523         block_rsv->space_info = __find_space_info(fs_info,
5524                                                   BTRFS_BLOCK_GROUP_METADATA);
5525         return block_rsv;
5526 }
5527
5528 void btrfs_free_block_rsv(struct btrfs_root *root,
5529                           struct btrfs_block_rsv *rsv)
5530 {
5531         if (!rsv)
5532                 return;
5533         btrfs_block_rsv_release(root, rsv, (u64)-1);
5534         kfree(rsv);
5535 }
5536
5537 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5538 {
5539         kfree(rsv);
5540 }
5541
5542 int btrfs_block_rsv_add(struct btrfs_root *root,
5543                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5544                         enum btrfs_reserve_flush_enum flush)
5545 {
5546         int ret;
5547
5548         if (num_bytes == 0)
5549                 return 0;
5550
5551         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5552         if (!ret) {
5553                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5554                 return 0;
5555         }
5556
5557         return ret;
5558 }
5559
5560 int btrfs_block_rsv_check(struct btrfs_root *root,
5561                           struct btrfs_block_rsv *block_rsv, int min_factor)
5562 {
5563         u64 num_bytes = 0;
5564         int ret = -ENOSPC;
5565
5566         if (!block_rsv)
5567                 return 0;
5568
5569         spin_lock(&block_rsv->lock);
5570         num_bytes = div_factor(block_rsv->size, min_factor);
5571         if (block_rsv->reserved >= num_bytes)
5572                 ret = 0;
5573         spin_unlock(&block_rsv->lock);
5574
5575         return ret;
5576 }
5577
5578 int btrfs_block_rsv_refill(struct btrfs_root *root,
5579                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5580                            enum btrfs_reserve_flush_enum flush)
5581 {
5582         u64 num_bytes = 0;
5583         int ret = -ENOSPC;
5584
5585         if (!block_rsv)
5586                 return 0;
5587
5588         spin_lock(&block_rsv->lock);
5589         num_bytes = min_reserved;
5590         if (block_rsv->reserved >= num_bytes)
5591                 ret = 0;
5592         else
5593                 num_bytes -= block_rsv->reserved;
5594         spin_unlock(&block_rsv->lock);
5595
5596         if (!ret)
5597                 return 0;
5598
5599         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5600         if (!ret) {
5601                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5602                 return 0;
5603         }
5604
5605         return ret;
5606 }
5607
5608 void btrfs_block_rsv_release(struct btrfs_root *root,
5609                              struct btrfs_block_rsv *block_rsv,
5610                              u64 num_bytes)
5611 {
5612         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5613         if (global_rsv == block_rsv ||
5614             block_rsv->space_info != global_rsv->space_info)
5615                 global_rsv = NULL;
5616         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5617                                 num_bytes);
5618 }
5619
5620 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5621 {
5622         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5623         struct btrfs_space_info *sinfo = block_rsv->space_info;
5624         u64 num_bytes;
5625
5626         /*
5627          * The global block rsv is based on the size of the extent tree, the
5628          * checksum tree and the root tree.  If the fs is empty we want to set
5629          * it to a minimal amount for safety.
5630          */
5631         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5632                 btrfs_root_used(&fs_info->csum_root->root_item) +
5633                 btrfs_root_used(&fs_info->tree_root->root_item);
5634         num_bytes = max_t(u64, num_bytes, SZ_16M);
5635
5636         spin_lock(&sinfo->lock);
5637         spin_lock(&block_rsv->lock);
5638
5639         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5640
5641         if (block_rsv->reserved < block_rsv->size) {
5642                 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5643                         sinfo->bytes_reserved + sinfo->bytes_readonly +
5644                         sinfo->bytes_may_use;
5645                 if (sinfo->total_bytes > num_bytes) {
5646                         num_bytes = sinfo->total_bytes - num_bytes;
5647                         num_bytes = min(num_bytes,
5648                                         block_rsv->size - block_rsv->reserved);
5649                         block_rsv->reserved += num_bytes;
5650                         sinfo->bytes_may_use += num_bytes;
5651                         trace_btrfs_space_reservation(fs_info, "space_info",
5652                                                       sinfo->flags, num_bytes,
5653                                                       1);
5654                 }
5655         } else if (block_rsv->reserved > block_rsv->size) {
5656                 num_bytes = block_rsv->reserved - block_rsv->size;
5657                 sinfo->bytes_may_use -= num_bytes;
5658                 trace_btrfs_space_reservation(fs_info, "space_info",
5659                                       sinfo->flags, num_bytes, 0);
5660                 block_rsv->reserved = block_rsv->size;
5661         }
5662
5663         if (block_rsv->reserved == block_rsv->size)
5664                 block_rsv->full = 1;
5665         else
5666                 block_rsv->full = 0;
5667
5668         spin_unlock(&block_rsv->lock);
5669         spin_unlock(&sinfo->lock);
5670 }
5671
5672 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5673 {
5674         struct btrfs_space_info *space_info;
5675
5676         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5677         fs_info->chunk_block_rsv.space_info = space_info;
5678
5679         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5680         fs_info->global_block_rsv.space_info = space_info;
5681         fs_info->delalloc_block_rsv.space_info = space_info;
5682         fs_info->trans_block_rsv.space_info = space_info;
5683         fs_info->empty_block_rsv.space_info = space_info;
5684         fs_info->delayed_block_rsv.space_info = space_info;
5685
5686         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5687         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5688         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5689         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5690         if (fs_info->quota_root)
5691                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5692         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5693
5694         update_global_block_rsv(fs_info);
5695 }
5696
5697 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5698 {
5699         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5700                                 (u64)-1);
5701         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5702         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5703         WARN_ON(fs_info->trans_block_rsv.size > 0);
5704         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5705         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5706         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5707         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5708         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5709 }
5710
5711 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5712                                   struct btrfs_root *root)
5713 {
5714         if (!trans->block_rsv)
5715                 return;
5716
5717         if (!trans->bytes_reserved)
5718                 return;
5719
5720         trace_btrfs_space_reservation(root->fs_info, "transaction",
5721                                       trans->transid, trans->bytes_reserved, 0);
5722         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5723         trans->bytes_reserved = 0;
5724 }
5725
5726 /*
5727  * To be called after all the new block groups attached to the transaction
5728  * handle have been created (btrfs_create_pending_block_groups()).
5729  */
5730 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5731 {
5732         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5733
5734         if (!trans->chunk_bytes_reserved)
5735                 return;
5736
5737         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5738
5739         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5740                                 trans->chunk_bytes_reserved);
5741         trans->chunk_bytes_reserved = 0;
5742 }
5743
5744 /* Can only return 0 or -ENOSPC */
5745 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5746                                   struct inode *inode)
5747 {
5748         struct btrfs_root *root = BTRFS_I(inode)->root;
5749         /*
5750          * We always use trans->block_rsv here as we will have reserved space
5751          * for our orphan when starting the transaction, using get_block_rsv()
5752          * here will sometimes make us choose the wrong block rsv as we could be
5753          * doing a reloc inode for a non refcounted root.
5754          */
5755         struct btrfs_block_rsv *src_rsv = trans->block_rsv;
5756         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5757
5758         /*
5759          * We need to hold space in order to delete our orphan item once we've
5760          * added it, so this takes the reservation so we can release it later
5761          * when we are truly done with the orphan item.
5762          */
5763         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5764         trace_btrfs_space_reservation(root->fs_info, "orphan",
5765                                       btrfs_ino(inode), num_bytes, 1);
5766         return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
5767 }
5768
5769 void btrfs_orphan_release_metadata(struct inode *inode)
5770 {
5771         struct btrfs_root *root = BTRFS_I(inode)->root;
5772         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5773         trace_btrfs_space_reservation(root->fs_info, "orphan",
5774                                       btrfs_ino(inode), num_bytes, 0);
5775         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5776 }
5777
5778 /*
5779  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5780  * root: the root of the parent directory
5781  * rsv: block reservation
5782  * items: the number of items that we need do reservation
5783  * qgroup_reserved: used to return the reserved size in qgroup
5784  *
5785  * This function is used to reserve the space for snapshot/subvolume
5786  * creation and deletion. Those operations are different with the
5787  * common file/directory operations, they change two fs/file trees
5788  * and root tree, the number of items that the qgroup reserves is
5789  * different with the free space reservation. So we can not use
5790  * the space reservation mechanism in start_transaction().
5791  */
5792 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5793                                      struct btrfs_block_rsv *rsv,
5794                                      int items,
5795                                      u64 *qgroup_reserved,
5796                                      bool use_global_rsv)
5797 {
5798         u64 num_bytes;
5799         int ret;
5800         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5801
5802         if (root->fs_info->quota_enabled) {
5803                 /* One for parent inode, two for dir entries */
5804                 num_bytes = 3 * root->nodesize;
5805                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5806                 if (ret)
5807                         return ret;
5808         } else {
5809                 num_bytes = 0;
5810         }
5811
5812         *qgroup_reserved = num_bytes;
5813
5814         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5815         rsv->space_info = __find_space_info(root->fs_info,
5816                                             BTRFS_BLOCK_GROUP_METADATA);
5817         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5818                                   BTRFS_RESERVE_FLUSH_ALL);
5819
5820         if (ret == -ENOSPC && use_global_rsv)
5821                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5822
5823         if (ret && *qgroup_reserved)
5824                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5825
5826         return ret;
5827 }
5828
5829 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5830                                       struct btrfs_block_rsv *rsv,
5831                                       u64 qgroup_reserved)
5832 {
5833         btrfs_block_rsv_release(root, rsv, (u64)-1);
5834 }
5835
5836 /**
5837  * drop_outstanding_extent - drop an outstanding extent
5838  * @inode: the inode we're dropping the extent for
5839  * @num_bytes: the number of bytes we're releasing.
5840  *
5841  * This is called when we are freeing up an outstanding extent, either called
5842  * after an error or after an extent is written.  This will return the number of
5843  * reserved extents that need to be freed.  This must be called with
5844  * BTRFS_I(inode)->lock held.
5845  */
5846 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5847 {
5848         unsigned drop_inode_space = 0;
5849         unsigned dropped_extents = 0;
5850         unsigned num_extents = 0;
5851
5852         num_extents = (unsigned)div64_u64(num_bytes +
5853                                           BTRFS_MAX_EXTENT_SIZE - 1,
5854                                           BTRFS_MAX_EXTENT_SIZE);
5855         ASSERT(num_extents);
5856         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5857         BTRFS_I(inode)->outstanding_extents -= num_extents;
5858
5859         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5860             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5861                                &BTRFS_I(inode)->runtime_flags))
5862                 drop_inode_space = 1;
5863
5864         /*
5865          * If we have more or the same amount of outstanding extents than we have
5866          * reserved then we need to leave the reserved extents count alone.
5867          */
5868         if (BTRFS_I(inode)->outstanding_extents >=
5869             BTRFS_I(inode)->reserved_extents)
5870                 return drop_inode_space;
5871
5872         dropped_extents = BTRFS_I(inode)->reserved_extents -
5873                 BTRFS_I(inode)->outstanding_extents;
5874         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5875         return dropped_extents + drop_inode_space;
5876 }
5877
5878 /**
5879  * calc_csum_metadata_size - return the amount of metadata space that must be
5880  *      reserved/freed for the given bytes.
5881  * @inode: the inode we're manipulating
5882  * @num_bytes: the number of bytes in question
5883  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5884  *
5885  * This adjusts the number of csum_bytes in the inode and then returns the
5886  * correct amount of metadata that must either be reserved or freed.  We
5887  * calculate how many checksums we can fit into one leaf and then divide the
5888  * number of bytes that will need to be checksumed by this value to figure out
5889  * how many checksums will be required.  If we are adding bytes then the number
5890  * may go up and we will return the number of additional bytes that must be
5891  * reserved.  If it is going down we will return the number of bytes that must
5892  * be freed.
5893  *
5894  * This must be called with BTRFS_I(inode)->lock held.
5895  */
5896 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5897                                    int reserve)
5898 {
5899         struct btrfs_root *root = BTRFS_I(inode)->root;
5900         u64 old_csums, num_csums;
5901
5902         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5903             BTRFS_I(inode)->csum_bytes == 0)
5904                 return 0;
5905
5906         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5907         if (reserve)
5908                 BTRFS_I(inode)->csum_bytes += num_bytes;
5909         else
5910                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5911         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5912
5913         /* No change, no need to reserve more */
5914         if (old_csums == num_csums)
5915                 return 0;
5916
5917         if (reserve)
5918                 return btrfs_calc_trans_metadata_size(root,
5919                                                       num_csums - old_csums);
5920
5921         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5922 }
5923
5924 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5925 {
5926         struct btrfs_root *root = BTRFS_I(inode)->root;
5927         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5928         u64 to_reserve = 0;
5929         u64 csum_bytes;
5930         unsigned nr_extents = 0;
5931         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5932         int ret = 0;
5933         bool delalloc_lock = true;
5934         u64 to_free = 0;
5935         unsigned dropped;
5936         bool release_extra = false;
5937
5938         /* If we are a free space inode we need to not flush since we will be in
5939          * the middle of a transaction commit.  We also don't need the delalloc
5940          * mutex since we won't race with anybody.  We need this mostly to make
5941          * lockdep shut its filthy mouth.
5942          *
5943          * If we have a transaction open (can happen if we call truncate_block
5944          * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
5945          */
5946         if (btrfs_is_free_space_inode(inode)) {
5947                 flush = BTRFS_RESERVE_NO_FLUSH;
5948                 delalloc_lock = false;
5949         } else if (current->journal_info) {
5950                 flush = BTRFS_RESERVE_FLUSH_LIMIT;
5951         }
5952
5953         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5954             btrfs_transaction_in_commit(root->fs_info))
5955                 schedule_timeout(1);
5956
5957         if (delalloc_lock)
5958                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5959
5960         num_bytes = ALIGN(num_bytes, root->sectorsize);
5961
5962         spin_lock(&BTRFS_I(inode)->lock);
5963         nr_extents = (unsigned)div64_u64(num_bytes +
5964                                          BTRFS_MAX_EXTENT_SIZE - 1,
5965                                          BTRFS_MAX_EXTENT_SIZE);
5966         BTRFS_I(inode)->outstanding_extents += nr_extents;
5967
5968         nr_extents = 0;
5969         if (BTRFS_I(inode)->outstanding_extents >
5970             BTRFS_I(inode)->reserved_extents)
5971                 nr_extents += BTRFS_I(inode)->outstanding_extents -
5972                         BTRFS_I(inode)->reserved_extents;
5973
5974         /* We always want to reserve a slot for updating the inode. */
5975         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
5976         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5977         csum_bytes = BTRFS_I(inode)->csum_bytes;
5978         spin_unlock(&BTRFS_I(inode)->lock);
5979
5980         if (root->fs_info->quota_enabled) {
5981                 ret = btrfs_qgroup_reserve_meta(root,
5982                                 nr_extents * root->nodesize);
5983                 if (ret)
5984                         goto out_fail;
5985         }
5986
5987         ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
5988         if (unlikely(ret)) {
5989                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5990                 goto out_fail;
5991         }
5992
5993         spin_lock(&BTRFS_I(inode)->lock);
5994         if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5995                              &BTRFS_I(inode)->runtime_flags)) {
5996                 to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
5997                 release_extra = true;
5998         }
5999         BTRFS_I(inode)->reserved_extents += nr_extents;
6000         spin_unlock(&BTRFS_I(inode)->lock);
6001
6002         if (delalloc_lock)
6003                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6004
6005         if (to_reserve)
6006                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6007                                               btrfs_ino(inode), to_reserve, 1);
6008         if (release_extra)
6009                 btrfs_block_rsv_release(root, block_rsv,
6010                                         btrfs_calc_trans_metadata_size(root,
6011                                                                        1));
6012         return 0;
6013
6014 out_fail:
6015         spin_lock(&BTRFS_I(inode)->lock);
6016         dropped = drop_outstanding_extent(inode, num_bytes);
6017         /*
6018          * If the inodes csum_bytes is the same as the original
6019          * csum_bytes then we know we haven't raced with any free()ers
6020          * so we can just reduce our inodes csum bytes and carry on.
6021          */
6022         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
6023                 calc_csum_metadata_size(inode, num_bytes, 0);
6024         } else {
6025                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
6026                 u64 bytes;
6027
6028                 /*
6029                  * This is tricky, but first we need to figure out how much we
6030                  * freed from any free-ers that occurred during this
6031                  * reservation, so we reset ->csum_bytes to the csum_bytes
6032                  * before we dropped our lock, and then call the free for the
6033                  * number of bytes that were freed while we were trying our
6034                  * reservation.
6035                  */
6036                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
6037                 BTRFS_I(inode)->csum_bytes = csum_bytes;
6038                 to_free = calc_csum_metadata_size(inode, bytes, 0);
6039
6040
6041                 /*
6042                  * Now we need to see how much we would have freed had we not
6043                  * been making this reservation and our ->csum_bytes were not
6044                  * artificially inflated.
6045                  */
6046                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
6047                 bytes = csum_bytes - orig_csum_bytes;
6048                 bytes = calc_csum_metadata_size(inode, bytes, 0);
6049
6050                 /*
6051                  * Now reset ->csum_bytes to what it should be.  If bytes is
6052                  * more than to_free then we would have freed more space had we
6053                  * not had an artificially high ->csum_bytes, so we need to free
6054                  * the remainder.  If bytes is the same or less then we don't
6055                  * need to do anything, the other free-ers did the correct
6056                  * thing.
6057                  */
6058                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
6059                 if (bytes > to_free)
6060                         to_free = bytes - to_free;
6061                 else
6062                         to_free = 0;
6063         }
6064         spin_unlock(&BTRFS_I(inode)->lock);
6065         if (dropped)
6066                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6067
6068         if (to_free) {
6069                 btrfs_block_rsv_release(root, block_rsv, to_free);
6070                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6071                                               btrfs_ino(inode), to_free, 0);
6072         }
6073         if (delalloc_lock)
6074                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6075         return ret;
6076 }
6077
6078 /**
6079  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6080  * @inode: the inode to release the reservation for
6081  * @num_bytes: the number of bytes we're releasing
6082  *
6083  * This will release the metadata reservation for an inode.  This can be called
6084  * once we complete IO for a given set of bytes to release their metadata
6085  * reservations.
6086  */
6087 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
6088 {
6089         struct btrfs_root *root = BTRFS_I(inode)->root;
6090         u64 to_free = 0;
6091         unsigned dropped;
6092
6093         num_bytes = ALIGN(num_bytes, root->sectorsize);
6094         spin_lock(&BTRFS_I(inode)->lock);
6095         dropped = drop_outstanding_extent(inode, num_bytes);
6096
6097         if (num_bytes)
6098                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
6099         spin_unlock(&BTRFS_I(inode)->lock);
6100         if (dropped > 0)
6101                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6102
6103         if (btrfs_test_is_dummy_root(root))
6104                 return;
6105
6106         trace_btrfs_space_reservation(root->fs_info, "delalloc",
6107                                       btrfs_ino(inode), to_free, 0);
6108
6109         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
6110                                 to_free);
6111 }
6112
6113 /**
6114  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6115  * delalloc
6116  * @inode: inode we're writing to
6117  * @start: start range we are writing to
6118  * @len: how long the range we are writing to
6119  *
6120  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
6121  *
6122  * This will do the following things
6123  *
6124  * o reserve space in data space info for num bytes
6125  *   and reserve precious corresponding qgroup space
6126  *   (Done in check_data_free_space)
6127  *
6128  * o reserve space for metadata space, based on the number of outstanding
6129  *   extents and how much csums will be needed
6130  *   also reserve metadata space in a per root over-reserve method.
6131  * o add to the inodes->delalloc_bytes
6132  * o add it to the fs_info's delalloc inodes list.
6133  *   (Above 3 all done in delalloc_reserve_metadata)
6134  *
6135  * Return 0 for success
6136  * Return <0 for error(-ENOSPC or -EQUOT)
6137  */
6138 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
6139 {
6140         int ret;
6141
6142         ret = btrfs_check_data_free_space(inode, start, len);
6143         if (ret < 0)
6144                 return ret;
6145         ret = btrfs_delalloc_reserve_metadata(inode, len);
6146         if (ret < 0)
6147                 btrfs_free_reserved_data_space(inode, start, len);
6148         return ret;
6149 }
6150
6151 /**
6152  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6153  * @inode: inode we're releasing space for
6154  * @start: start position of the space already reserved
6155  * @len: the len of the space already reserved
6156  *
6157  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
6158  * called in the case that we don't need the metadata AND data reservations
6159  * anymore.  So if there is an error or we insert an inline extent.
6160  *
6161  * This function will release the metadata space that was not used and will
6162  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6163  * list if there are no delalloc bytes left.
6164  * Also it will handle the qgroup reserved space.
6165  */
6166 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
6167 {
6168         btrfs_delalloc_release_metadata(inode, len);
6169         btrfs_free_reserved_data_space(inode, start, len);
6170 }
6171
6172 static int update_block_group(struct btrfs_trans_handle *trans,
6173                               struct btrfs_root *root, u64 bytenr,
6174                               u64 num_bytes, int alloc)
6175 {
6176         struct btrfs_block_group_cache *cache = NULL;
6177         struct btrfs_fs_info *info = root->fs_info;
6178         u64 total = num_bytes;
6179         u64 old_val;
6180         u64 byte_in_group;
6181         int factor;
6182
6183         /* block accounting for super block */
6184         spin_lock(&info->delalloc_root_lock);
6185         old_val = btrfs_super_bytes_used(info->super_copy);
6186         if (alloc)
6187                 old_val += num_bytes;
6188         else
6189                 old_val -= num_bytes;
6190         btrfs_set_super_bytes_used(info->super_copy, old_val);
6191         spin_unlock(&info->delalloc_root_lock);
6192
6193         while (total) {
6194                 cache = btrfs_lookup_block_group(info, bytenr);
6195                 if (!cache)
6196                         return -ENOENT;
6197                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
6198                                     BTRFS_BLOCK_GROUP_RAID1 |
6199                                     BTRFS_BLOCK_GROUP_RAID10))
6200                         factor = 2;
6201                 else
6202                         factor = 1;
6203                 /*
6204                  * If this block group has free space cache written out, we
6205                  * need to make sure to load it if we are removing space.  This
6206                  * is because we need the unpinning stage to actually add the
6207                  * space back to the block group, otherwise we will leak space.
6208                  */
6209                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6210                         cache_block_group(cache, 1);
6211
6212                 byte_in_group = bytenr - cache->key.objectid;
6213                 WARN_ON(byte_in_group > cache->key.offset);
6214
6215                 spin_lock(&cache->space_info->lock);
6216                 spin_lock(&cache->lock);
6217
6218                 if (btrfs_test_opt(root, SPACE_CACHE) &&
6219                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6220                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6221
6222                 old_val = btrfs_block_group_used(&cache->item);
6223                 num_bytes = min(total, cache->key.offset - byte_in_group);
6224                 if (alloc) {
6225                         old_val += num_bytes;
6226                         btrfs_set_block_group_used(&cache->item, old_val);
6227                         cache->reserved -= num_bytes;
6228                         cache->space_info->bytes_reserved -= num_bytes;
6229                         cache->space_info->bytes_used += num_bytes;
6230                         cache->space_info->disk_used += num_bytes * factor;
6231                         spin_unlock(&cache->lock);
6232                         spin_unlock(&cache->space_info->lock);
6233                 } else {
6234                         old_val -= num_bytes;
6235                         btrfs_set_block_group_used(&cache->item, old_val);
6236                         cache->pinned += num_bytes;
6237                         cache->space_info->bytes_pinned += num_bytes;
6238                         cache->space_info->bytes_used -= num_bytes;
6239                         cache->space_info->disk_used -= num_bytes * factor;
6240                         spin_unlock(&cache->lock);
6241                         spin_unlock(&cache->space_info->lock);
6242
6243                         trace_btrfs_space_reservation(root->fs_info, "pinned",
6244                                                       cache->space_info->flags,
6245                                                       num_bytes, 1);
6246                         set_extent_dirty(info->pinned_extents,
6247                                          bytenr, bytenr + num_bytes - 1,
6248                                          GFP_NOFS | __GFP_NOFAIL);
6249                 }
6250
6251                 spin_lock(&trans->transaction->dirty_bgs_lock);
6252                 if (list_empty(&cache->dirty_list)) {
6253                         list_add_tail(&cache->dirty_list,
6254                                       &trans->transaction->dirty_bgs);
6255                                 trans->transaction->num_dirty_bgs++;
6256                         btrfs_get_block_group(cache);
6257                 }
6258                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6259
6260                 /*
6261                  * No longer have used bytes in this block group, queue it for
6262                  * deletion. We do this after adding the block group to the
6263                  * dirty list to avoid races between cleaner kthread and space
6264                  * cache writeout.
6265                  */
6266                 if (!alloc && old_val == 0) {
6267                         spin_lock(&info->unused_bgs_lock);
6268                         if (list_empty(&cache->bg_list)) {
6269                                 btrfs_get_block_group(cache);
6270                                 list_add_tail(&cache->bg_list,
6271                                               &info->unused_bgs);
6272                         }
6273                         spin_unlock(&info->unused_bgs_lock);
6274                 }
6275
6276                 btrfs_put_block_group(cache);
6277                 total -= num_bytes;
6278                 bytenr += num_bytes;
6279         }
6280         return 0;
6281 }
6282
6283 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
6284 {
6285         struct btrfs_block_group_cache *cache;
6286         u64 bytenr;
6287
6288         spin_lock(&root->fs_info->block_group_cache_lock);
6289         bytenr = root->fs_info->first_logical_byte;
6290         spin_unlock(&root->fs_info->block_group_cache_lock);
6291
6292         if (bytenr < (u64)-1)
6293                 return bytenr;
6294
6295         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
6296         if (!cache)
6297                 return 0;
6298
6299         bytenr = cache->key.objectid;
6300         btrfs_put_block_group(cache);
6301
6302         return bytenr;
6303 }
6304
6305 static int pin_down_extent(struct btrfs_root *root,
6306                            struct btrfs_block_group_cache *cache,
6307                            u64 bytenr, u64 num_bytes, int reserved)
6308 {
6309         spin_lock(&cache->space_info->lock);
6310         spin_lock(&cache->lock);
6311         cache->pinned += num_bytes;
6312         cache->space_info->bytes_pinned += num_bytes;
6313         if (reserved) {
6314                 cache->reserved -= num_bytes;
6315                 cache->space_info->bytes_reserved -= num_bytes;
6316         }
6317         spin_unlock(&cache->lock);
6318         spin_unlock(&cache->space_info->lock);
6319
6320         trace_btrfs_space_reservation(root->fs_info, "pinned",
6321                                       cache->space_info->flags, num_bytes, 1);
6322         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6323                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6324         return 0;
6325 }
6326
6327 /*
6328  * this function must be called within transaction
6329  */
6330 int btrfs_pin_extent(struct btrfs_root *root,
6331                      u64 bytenr, u64 num_bytes, int reserved)
6332 {
6333         struct btrfs_block_group_cache *cache;
6334
6335         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6336         BUG_ON(!cache); /* Logic error */
6337
6338         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6339
6340         btrfs_put_block_group(cache);
6341         return 0;
6342 }
6343
6344 /*
6345  * this function must be called within transaction
6346  */
6347 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6348                                     u64 bytenr, u64 num_bytes)
6349 {
6350         struct btrfs_block_group_cache *cache;
6351         int ret;
6352
6353         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6354         if (!cache)
6355                 return -EINVAL;
6356
6357         /*
6358          * pull in the free space cache (if any) so that our pin
6359          * removes the free space from the cache.  We have load_only set
6360          * to one because the slow code to read in the free extents does check
6361          * the pinned extents.
6362          */
6363         cache_block_group(cache, 1);
6364
6365         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6366
6367         /* remove us from the free space cache (if we're there at all) */
6368         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6369         btrfs_put_block_group(cache);
6370         return ret;
6371 }
6372
6373 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6374 {
6375         int ret;
6376         struct btrfs_block_group_cache *block_group;
6377         struct btrfs_caching_control *caching_ctl;
6378
6379         block_group = btrfs_lookup_block_group(root->fs_info, start);
6380         if (!block_group)
6381                 return -EINVAL;
6382
6383         cache_block_group(block_group, 0);
6384         caching_ctl = get_caching_control(block_group);
6385
6386         if (!caching_ctl) {
6387                 /* Logic error */
6388                 BUG_ON(!block_group_cache_done(block_group));
6389                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6390         } else {
6391                 mutex_lock(&caching_ctl->mutex);
6392
6393                 if (start >= caching_ctl->progress) {
6394                         ret = add_excluded_extent(root, start, num_bytes);
6395                 } else if (start + num_bytes <= caching_ctl->progress) {
6396                         ret = btrfs_remove_free_space(block_group,
6397                                                       start, num_bytes);
6398                 } else {
6399                         num_bytes = caching_ctl->progress - start;
6400                         ret = btrfs_remove_free_space(block_group,
6401                                                       start, num_bytes);
6402                         if (ret)
6403                                 goto out_lock;
6404
6405                         num_bytes = (start + num_bytes) -
6406                                 caching_ctl->progress;
6407                         start = caching_ctl->progress;
6408                         ret = add_excluded_extent(root, start, num_bytes);
6409                 }
6410 out_lock:
6411                 mutex_unlock(&caching_ctl->mutex);
6412                 put_caching_control(caching_ctl);
6413         }
6414         btrfs_put_block_group(block_group);
6415         return ret;
6416 }
6417
6418 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6419                                  struct extent_buffer *eb)
6420 {
6421         struct btrfs_file_extent_item *item;
6422         struct btrfs_key key;
6423         int found_type;
6424         int i;
6425
6426         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6427                 return 0;
6428
6429         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6430                 btrfs_item_key_to_cpu(eb, &key, i);
6431                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6432                         continue;
6433                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6434                 found_type = btrfs_file_extent_type(eb, item);
6435                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6436                         continue;
6437                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6438                         continue;
6439                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6440                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6441                 __exclude_logged_extent(log, key.objectid, key.offset);
6442         }
6443
6444         return 0;
6445 }
6446
6447 static void
6448 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6449 {
6450         atomic_inc(&bg->reservations);
6451 }
6452
6453 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6454                                         const u64 start)
6455 {
6456         struct btrfs_block_group_cache *bg;
6457
6458         bg = btrfs_lookup_block_group(fs_info, start);
6459         ASSERT(bg);
6460         if (atomic_dec_and_test(&bg->reservations))
6461                 wake_up_atomic_t(&bg->reservations);
6462         btrfs_put_block_group(bg);
6463 }
6464
6465 static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6466 {
6467         schedule();
6468         return 0;
6469 }
6470
6471 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6472 {
6473         struct btrfs_space_info *space_info = bg->space_info;
6474
6475         ASSERT(bg->ro);
6476
6477         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6478                 return;
6479
6480         /*
6481          * Our block group is read only but before we set it to read only,
6482          * some task might have had allocated an extent from it already, but it
6483          * has not yet created a respective ordered extent (and added it to a
6484          * root's list of ordered extents).
6485          * Therefore wait for any task currently allocating extents, since the
6486          * block group's reservations counter is incremented while a read lock
6487          * on the groups' semaphore is held and decremented after releasing
6488          * the read access on that semaphore and creating the ordered extent.
6489          */
6490         down_write(&space_info->groups_sem);
6491         up_write(&space_info->groups_sem);
6492
6493         wait_on_atomic_t(&bg->reservations,
6494                          btrfs_wait_bg_reservations_atomic_t,
6495                          TASK_UNINTERRUPTIBLE);
6496 }
6497
6498 /**
6499  * btrfs_update_reserved_bytes - update the block_group and space info counters
6500  * @cache:      The cache we are manipulating
6501  * @num_bytes:  The number of bytes in question
6502  * @reserve:    One of the reservation enums
6503  * @delalloc:   The blocks are allocated for the delalloc write
6504  *
6505  * This is called by the allocator when it reserves space, or by somebody who is
6506  * freeing space that was never actually used on disk.  For example if you
6507  * reserve some space for a new leaf in transaction A and before transaction A
6508  * commits you free that leaf, you call this with reserve set to 0 in order to
6509  * clear the reservation.
6510  *
6511  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6512  * ENOSPC accounting.  For data we handle the reservation through clearing the
6513  * delalloc bits in the io_tree.  We have to do this since we could end up
6514  * allocating less disk space for the amount of data we have reserved in the
6515  * case of compression.
6516  *
6517  * If this is a reservation and the block group has become read only we cannot
6518  * make the reservation and return -EAGAIN, otherwise this function always
6519  * succeeds.
6520  */
6521 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6522                                        u64 num_bytes, int reserve, int delalloc)
6523 {
6524         struct btrfs_space_info *space_info = cache->space_info;
6525         int ret = 0;
6526
6527         spin_lock(&space_info->lock);
6528         spin_lock(&cache->lock);
6529         if (reserve != RESERVE_FREE) {
6530                 if (cache->ro) {
6531                         ret = -EAGAIN;
6532                 } else {
6533                         cache->reserved += num_bytes;
6534                         space_info->bytes_reserved += num_bytes;
6535                         if (reserve == RESERVE_ALLOC) {
6536                                 trace_btrfs_space_reservation(cache->fs_info,
6537                                                 "space_info", space_info->flags,
6538                                                 num_bytes, 0);
6539                                 space_info->bytes_may_use -= num_bytes;
6540                         }
6541
6542                         if (delalloc)
6543                                 cache->delalloc_bytes += num_bytes;
6544                 }
6545         } else {
6546                 if (cache->ro)
6547                         space_info->bytes_readonly += num_bytes;
6548                 cache->reserved -= num_bytes;
6549                 space_info->bytes_reserved -= num_bytes;
6550
6551                 if (delalloc)
6552                         cache->delalloc_bytes -= num_bytes;
6553         }
6554         spin_unlock(&cache->lock);
6555         spin_unlock(&space_info->lock);
6556         return ret;
6557 }
6558
6559 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6560                                 struct btrfs_root *root)
6561 {
6562         struct btrfs_fs_info *fs_info = root->fs_info;
6563         struct btrfs_caching_control *next;
6564         struct btrfs_caching_control *caching_ctl;
6565         struct btrfs_block_group_cache *cache;
6566
6567         down_write(&fs_info->commit_root_sem);
6568
6569         list_for_each_entry_safe(caching_ctl, next,
6570                                  &fs_info->caching_block_groups, list) {
6571                 cache = caching_ctl->block_group;
6572                 if (block_group_cache_done(cache)) {
6573                         cache->last_byte_to_unpin = (u64)-1;
6574                         list_del_init(&caching_ctl->list);
6575                         put_caching_control(caching_ctl);
6576                 } else {
6577                         cache->last_byte_to_unpin = caching_ctl->progress;
6578                 }
6579         }
6580
6581         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6582                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6583         else
6584                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6585
6586         up_write(&fs_info->commit_root_sem);
6587
6588         update_global_block_rsv(fs_info);
6589 }
6590
6591 /*
6592  * Returns the free cluster for the given space info and sets empty_cluster to
6593  * what it should be based on the mount options.
6594  */
6595 static struct btrfs_free_cluster *
6596 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6597                    u64 *empty_cluster)
6598 {
6599         struct btrfs_free_cluster *ret = NULL;
6600         bool ssd = btrfs_test_opt(root, SSD);
6601
6602         *empty_cluster = 0;
6603         if (btrfs_mixed_space_info(space_info))
6604                 return ret;
6605
6606         if (ssd)
6607                 *empty_cluster = SZ_2M;
6608         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6609                 ret = &root->fs_info->meta_alloc_cluster;
6610                 if (!ssd)
6611                         *empty_cluster = SZ_64K;
6612         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6613                 ret = &root->fs_info->data_alloc_cluster;
6614         }
6615
6616         return ret;
6617 }
6618
6619 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6620                               const bool return_free_space)
6621 {
6622         struct btrfs_fs_info *fs_info = root->fs_info;
6623         struct btrfs_block_group_cache *cache = NULL;
6624         struct btrfs_space_info *space_info;
6625         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6626         struct btrfs_free_cluster *cluster = NULL;
6627         u64 len;
6628         u64 total_unpinned = 0;
6629         u64 empty_cluster = 0;
6630         bool readonly;
6631
6632         while (start <= end) {
6633                 readonly = false;
6634                 if (!cache ||
6635                     start >= cache->key.objectid + cache->key.offset) {
6636                         if (cache)
6637                                 btrfs_put_block_group(cache);
6638                         total_unpinned = 0;
6639                         cache = btrfs_lookup_block_group(fs_info, start);
6640                         BUG_ON(!cache); /* Logic error */
6641
6642                         cluster = fetch_cluster_info(root,
6643                                                      cache->space_info,
6644                                                      &empty_cluster);
6645                         empty_cluster <<= 1;
6646                 }
6647
6648                 len = cache->key.objectid + cache->key.offset - start;
6649                 len = min(len, end + 1 - start);
6650
6651                 if (start < cache->last_byte_to_unpin) {
6652                         len = min(len, cache->last_byte_to_unpin - start);
6653                         if (return_free_space)
6654                                 btrfs_add_free_space(cache, start, len);
6655                 }
6656
6657                 start += len;
6658                 total_unpinned += len;
6659                 space_info = cache->space_info;
6660
6661                 /*
6662                  * If this space cluster has been marked as fragmented and we've
6663                  * unpinned enough in this block group to potentially allow a
6664                  * cluster to be created inside of it go ahead and clear the
6665                  * fragmented check.
6666                  */
6667                 if (cluster && cluster->fragmented &&
6668                     total_unpinned > empty_cluster) {
6669                         spin_lock(&cluster->lock);
6670                         cluster->fragmented = 0;
6671                         spin_unlock(&cluster->lock);
6672                 }
6673
6674                 spin_lock(&space_info->lock);
6675                 spin_lock(&cache->lock);
6676                 cache->pinned -= len;
6677                 space_info->bytes_pinned -= len;
6678
6679                 trace_btrfs_space_reservation(fs_info, "pinned",
6680                                               space_info->flags, len, 0);
6681                 space_info->max_extent_size = 0;
6682                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6683                 if (cache->ro) {
6684                         space_info->bytes_readonly += len;
6685                         readonly = true;
6686                 }
6687                 spin_unlock(&cache->lock);
6688                 if (!readonly && return_free_space &&
6689                     global_rsv->space_info == space_info) {
6690                         u64 to_add = len;
6691                         WARN_ON(!return_free_space);
6692                         spin_lock(&global_rsv->lock);
6693                         if (!global_rsv->full) {
6694                                 to_add = min(len, global_rsv->size -
6695                                              global_rsv->reserved);
6696                                 global_rsv->reserved += to_add;
6697                                 space_info->bytes_may_use += to_add;
6698                                 if (global_rsv->reserved >= global_rsv->size)
6699                                         global_rsv->full = 1;
6700                                 trace_btrfs_space_reservation(fs_info,
6701                                                               "space_info",
6702                                                               space_info->flags,
6703                                                               to_add, 1);
6704                                 len -= to_add;
6705                         }
6706                         spin_unlock(&global_rsv->lock);
6707                         /* Add to any tickets we may have */
6708                         if (len)
6709                                 space_info_add_new_bytes(fs_info, space_info,
6710                                                          len);
6711                 }
6712                 spin_unlock(&space_info->lock);
6713         }
6714
6715         if (cache)
6716                 btrfs_put_block_group(cache);
6717         return 0;
6718 }
6719
6720 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6721                                struct btrfs_root *root)
6722 {
6723         struct btrfs_fs_info *fs_info = root->fs_info;
6724         struct btrfs_block_group_cache *block_group, *tmp;
6725         struct list_head *deleted_bgs;
6726         struct extent_io_tree *unpin;
6727         u64 start;
6728         u64 end;
6729         int ret;
6730
6731         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6732                 unpin = &fs_info->freed_extents[1];
6733         else
6734                 unpin = &fs_info->freed_extents[0];
6735
6736         while (!trans->aborted) {
6737                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6738                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6739                                             EXTENT_DIRTY, NULL);
6740                 if (ret) {
6741                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6742                         break;
6743                 }
6744
6745                 if (btrfs_test_opt(root, DISCARD))
6746                         ret = btrfs_discard_extent(root, start,
6747                                                    end + 1 - start, NULL);
6748
6749                 clear_extent_dirty(unpin, start, end);
6750                 unpin_extent_range(root, start, end, true);
6751                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6752                 cond_resched();
6753         }
6754
6755         /*
6756          * Transaction is finished.  We don't need the lock anymore.  We
6757          * do need to clean up the block groups in case of a transaction
6758          * abort.
6759          */
6760         deleted_bgs = &trans->transaction->deleted_bgs;
6761         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6762                 u64 trimmed = 0;
6763
6764                 ret = -EROFS;
6765                 if (!trans->aborted)
6766                         ret = btrfs_discard_extent(root,
6767                                                    block_group->key.objectid,
6768                                                    block_group->key.offset,
6769                                                    &trimmed);
6770
6771                 list_del_init(&block_group->bg_list);
6772                 btrfs_put_block_group_trimming(block_group);
6773                 btrfs_put_block_group(block_group);
6774
6775                 if (ret) {
6776                         const char *errstr = btrfs_decode_error(ret);
6777                         btrfs_warn(fs_info,
6778                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6779                                    ret, errstr);
6780                 }
6781         }
6782
6783         return 0;
6784 }
6785
6786 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6787                              u64 owner, u64 root_objectid)
6788 {
6789         struct btrfs_space_info *space_info;
6790         u64 flags;
6791
6792         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6793                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6794                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6795                 else
6796                         flags = BTRFS_BLOCK_GROUP_METADATA;
6797         } else {
6798                 flags = BTRFS_BLOCK_GROUP_DATA;
6799         }
6800
6801         space_info = __find_space_info(fs_info, flags);
6802         BUG_ON(!space_info); /* Logic bug */
6803         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6804 }
6805
6806
6807 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6808                                 struct btrfs_root *root,
6809                                 struct btrfs_delayed_ref_node *node, u64 parent,
6810                                 u64 root_objectid, u64 owner_objectid,
6811                                 u64 owner_offset, int refs_to_drop,
6812                                 struct btrfs_delayed_extent_op *extent_op)
6813 {
6814         struct btrfs_key key;
6815         struct btrfs_path *path;
6816         struct btrfs_fs_info *info = root->fs_info;
6817         struct btrfs_root *extent_root = info->extent_root;
6818         struct extent_buffer *leaf;
6819         struct btrfs_extent_item *ei;
6820         struct btrfs_extent_inline_ref *iref;
6821         int ret;
6822         int is_data;
6823         int extent_slot = 0;
6824         int found_extent = 0;
6825         int num_to_del = 1;
6826         u32 item_size;
6827         u64 refs;
6828         u64 bytenr = node->bytenr;
6829         u64 num_bytes = node->num_bytes;
6830         int last_ref = 0;
6831         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6832                                                  SKINNY_METADATA);
6833
6834         path = btrfs_alloc_path();
6835         if (!path)
6836                 return -ENOMEM;
6837
6838         path->reada = READA_FORWARD;
6839         path->leave_spinning = 1;
6840
6841         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6842         BUG_ON(!is_data && refs_to_drop != 1);
6843
6844         if (is_data)
6845                 skinny_metadata = 0;
6846
6847         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6848                                     bytenr, num_bytes, parent,
6849                                     root_objectid, owner_objectid,
6850                                     owner_offset);
6851         if (ret == 0) {
6852                 extent_slot = path->slots[0];
6853                 while (extent_slot >= 0) {
6854                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6855                                               extent_slot);
6856                         if (key.objectid != bytenr)
6857                                 break;
6858                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6859                             key.offset == num_bytes) {
6860                                 found_extent = 1;
6861                                 break;
6862                         }
6863                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6864                             key.offset == owner_objectid) {
6865                                 found_extent = 1;
6866                                 break;
6867                         }
6868                         if (path->slots[0] - extent_slot > 5)
6869                                 break;
6870                         extent_slot--;
6871                 }
6872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6873                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6874                 if (found_extent && item_size < sizeof(*ei))
6875                         found_extent = 0;
6876 #endif
6877                 if (!found_extent) {
6878                         BUG_ON(iref);
6879                         ret = remove_extent_backref(trans, extent_root, path,
6880                                                     NULL, refs_to_drop,
6881                                                     is_data, &last_ref);
6882                         if (ret) {
6883                                 btrfs_abort_transaction(trans, extent_root, ret);
6884                                 goto out;
6885                         }
6886                         btrfs_release_path(path);
6887                         path->leave_spinning = 1;
6888
6889                         key.objectid = bytenr;
6890                         key.type = BTRFS_EXTENT_ITEM_KEY;
6891                         key.offset = num_bytes;
6892
6893                         if (!is_data && skinny_metadata) {
6894                                 key.type = BTRFS_METADATA_ITEM_KEY;
6895                                 key.offset = owner_objectid;
6896                         }
6897
6898                         ret = btrfs_search_slot(trans, extent_root,
6899                                                 &key, path, -1, 1);
6900                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6901                                 /*
6902                                  * Couldn't find our skinny metadata item,
6903                                  * see if we have ye olde extent item.
6904                                  */
6905                                 path->slots[0]--;
6906                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6907                                                       path->slots[0]);
6908                                 if (key.objectid == bytenr &&
6909                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6910                                     key.offset == num_bytes)
6911                                         ret = 0;
6912                         }
6913
6914                         if (ret > 0 && skinny_metadata) {
6915                                 skinny_metadata = false;
6916                                 key.objectid = bytenr;
6917                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6918                                 key.offset = num_bytes;
6919                                 btrfs_release_path(path);
6920                                 ret = btrfs_search_slot(trans, extent_root,
6921                                                         &key, path, -1, 1);
6922                         }
6923
6924                         if (ret) {
6925                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6926                                         ret, bytenr);
6927                                 if (ret > 0)
6928                                         btrfs_print_leaf(extent_root,
6929                                                          path->nodes[0]);
6930                         }
6931                         if (ret < 0) {
6932                                 btrfs_abort_transaction(trans, extent_root, ret);
6933                                 goto out;
6934                         }
6935                         extent_slot = path->slots[0];
6936                 }
6937         } else if (WARN_ON(ret == -ENOENT)) {
6938                 btrfs_print_leaf(extent_root, path->nodes[0]);
6939                 btrfs_err(info,
6940                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6941                         bytenr, parent, root_objectid, owner_objectid,
6942                         owner_offset);
6943                 btrfs_abort_transaction(trans, extent_root, ret);
6944                 goto out;
6945         } else {
6946                 btrfs_abort_transaction(trans, extent_root, ret);
6947                 goto out;
6948         }
6949
6950         leaf = path->nodes[0];
6951         item_size = btrfs_item_size_nr(leaf, extent_slot);
6952 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6953         if (item_size < sizeof(*ei)) {
6954                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6955                 ret = convert_extent_item_v0(trans, extent_root, path,
6956                                              owner_objectid, 0);
6957                 if (ret < 0) {
6958                         btrfs_abort_transaction(trans, extent_root, ret);
6959                         goto out;
6960                 }
6961
6962                 btrfs_release_path(path);
6963                 path->leave_spinning = 1;
6964
6965                 key.objectid = bytenr;
6966                 key.type = BTRFS_EXTENT_ITEM_KEY;
6967                 key.offset = num_bytes;
6968
6969                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6970                                         -1, 1);
6971                 if (ret) {
6972                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6973                                 ret, bytenr);
6974                         btrfs_print_leaf(extent_root, path->nodes[0]);
6975                 }
6976                 if (ret < 0) {
6977                         btrfs_abort_transaction(trans, extent_root, ret);
6978                         goto out;
6979                 }
6980
6981                 extent_slot = path->slots[0];
6982                 leaf = path->nodes[0];
6983                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6984         }
6985 #endif
6986         BUG_ON(item_size < sizeof(*ei));
6987         ei = btrfs_item_ptr(leaf, extent_slot,
6988                             struct btrfs_extent_item);
6989         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6990             key.type == BTRFS_EXTENT_ITEM_KEY) {
6991                 struct btrfs_tree_block_info *bi;
6992                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6993                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6994                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6995         }
6996
6997         refs = btrfs_extent_refs(leaf, ei);
6998         if (refs < refs_to_drop) {
6999                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
7000                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
7001                 ret = -EINVAL;
7002                 btrfs_abort_transaction(trans, extent_root, ret);
7003                 goto out;
7004         }
7005         refs -= refs_to_drop;
7006
7007         if (refs > 0) {
7008                 if (extent_op)
7009                         __run_delayed_extent_op(extent_op, leaf, ei);
7010                 /*
7011                  * In the case of inline back ref, reference count will
7012                  * be updated by remove_extent_backref
7013                  */
7014                 if (iref) {
7015                         BUG_ON(!found_extent);
7016                 } else {
7017                         btrfs_set_extent_refs(leaf, ei, refs);
7018                         btrfs_mark_buffer_dirty(leaf);
7019                 }
7020                 if (found_extent) {
7021                         ret = remove_extent_backref(trans, extent_root, path,
7022                                                     iref, refs_to_drop,
7023                                                     is_data, &last_ref);
7024                         if (ret) {
7025                                 btrfs_abort_transaction(trans, extent_root, ret);
7026                                 goto out;
7027                         }
7028                 }
7029                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
7030                                  root_objectid);
7031         } else {
7032                 if (found_extent) {
7033                         BUG_ON(is_data && refs_to_drop !=
7034                                extent_data_ref_count(path, iref));
7035                         if (iref) {
7036                                 BUG_ON(path->slots[0] != extent_slot);
7037                         } else {
7038                                 BUG_ON(path->slots[0] != extent_slot + 1);
7039                                 path->slots[0] = extent_slot;
7040                                 num_to_del = 2;
7041                         }
7042                 }
7043
7044                 last_ref = 1;
7045                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7046                                       num_to_del);
7047                 if (ret) {
7048                         btrfs_abort_transaction(trans, extent_root, ret);
7049                         goto out;
7050                 }
7051                 btrfs_release_path(path);
7052
7053                 if (is_data) {
7054                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
7055                         if (ret) {
7056                                 btrfs_abort_transaction(trans, extent_root, ret);
7057                                 goto out;
7058                         }
7059                 }
7060
7061                 ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
7062                                              num_bytes);
7063                 if (ret) {
7064                         btrfs_abort_transaction(trans, extent_root, ret);
7065                         goto out;
7066                 }
7067
7068                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
7069                 if (ret) {
7070                         btrfs_abort_transaction(trans, extent_root, ret);
7071                         goto out;
7072                 }
7073         }
7074         btrfs_release_path(path);
7075
7076 out:
7077         btrfs_free_path(path);
7078         return ret;
7079 }
7080
7081 /*
7082  * when we free an block, it is possible (and likely) that we free the last
7083  * delayed ref for that extent as well.  This searches the delayed ref tree for
7084  * a given extent, and if there are no other delayed refs to be processed, it
7085  * removes it from the tree.
7086  */
7087 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7088                                       struct btrfs_root *root, u64 bytenr)
7089 {
7090         struct btrfs_delayed_ref_head *head;
7091         struct btrfs_delayed_ref_root *delayed_refs;
7092         int ret = 0;
7093
7094         delayed_refs = &trans->transaction->delayed_refs;
7095         spin_lock(&delayed_refs->lock);
7096         head = btrfs_find_delayed_ref_head(trans, bytenr);
7097         if (!head)
7098                 goto out_delayed_unlock;
7099
7100         spin_lock(&head->lock);
7101         if (!list_empty(&head->ref_list))
7102                 goto out;
7103
7104         if (head->extent_op) {
7105                 if (!head->must_insert_reserved)
7106                         goto out;
7107                 btrfs_free_delayed_extent_op(head->extent_op);
7108                 head->extent_op = NULL;
7109         }
7110
7111         /*
7112          * waiting for the lock here would deadlock.  If someone else has it
7113          * locked they are already in the process of dropping it anyway
7114          */
7115         if (!mutex_trylock(&head->mutex))
7116                 goto out;
7117
7118         /*
7119          * at this point we have a head with no other entries.  Go
7120          * ahead and process it.
7121          */
7122         head->node.in_tree = 0;
7123         rb_erase(&head->href_node, &delayed_refs->href_root);
7124
7125         atomic_dec(&delayed_refs->num_entries);
7126
7127         /*
7128          * we don't take a ref on the node because we're removing it from the
7129          * tree, so we just steal the ref the tree was holding.
7130          */
7131         delayed_refs->num_heads--;
7132         if (head->processing == 0)
7133                 delayed_refs->num_heads_ready--;
7134         head->processing = 0;
7135         spin_unlock(&head->lock);
7136         spin_unlock(&delayed_refs->lock);
7137
7138         BUG_ON(head->extent_op);
7139         if (head->must_insert_reserved)
7140                 ret = 1;
7141
7142         mutex_unlock(&head->mutex);
7143         btrfs_put_delayed_ref(&head->node);
7144         return ret;
7145 out:
7146         spin_unlock(&head->lock);
7147
7148 out_delayed_unlock:
7149         spin_unlock(&delayed_refs->lock);
7150         return 0;
7151 }
7152
7153 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7154                            struct btrfs_root *root,
7155                            struct extent_buffer *buf,
7156                            u64 parent, int last_ref)
7157 {
7158         int pin = 1;
7159         int ret;
7160
7161         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7162                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7163                                         buf->start, buf->len,
7164                                         parent, root->root_key.objectid,
7165                                         btrfs_header_level(buf),
7166                                         BTRFS_DROP_DELAYED_REF, NULL);
7167                 BUG_ON(ret); /* -ENOMEM */
7168         }
7169
7170         if (!last_ref)
7171                 return;
7172
7173         if (btrfs_header_generation(buf) == trans->transid) {
7174                 struct btrfs_block_group_cache *cache;
7175
7176                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7177                         ret = check_ref_cleanup(trans, root, buf->start);
7178                         if (!ret)
7179                                 goto out;
7180                 }
7181
7182                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
7183
7184                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7185                         pin_down_extent(root, cache, buf->start, buf->len, 1);
7186                         btrfs_put_block_group(cache);
7187                         goto out;
7188                 }
7189
7190                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7191
7192                 btrfs_add_free_space(cache, buf->start, buf->len);
7193                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
7194                 btrfs_put_block_group(cache);
7195                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
7196                 pin = 0;
7197         }
7198 out:
7199         if (pin)
7200                 add_pinned_bytes(root->fs_info, buf->len,
7201                                  btrfs_header_level(buf),
7202                                  root->root_key.objectid);
7203
7204         /*
7205          * Deleting the buffer, clear the corrupt flag since it doesn't matter
7206          * anymore.
7207          */
7208         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7209 }
7210
7211 /* Can return -ENOMEM */
7212 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7213                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7214                       u64 owner, u64 offset)
7215 {
7216         int ret;
7217         struct btrfs_fs_info *fs_info = root->fs_info;
7218
7219         if (btrfs_test_is_dummy_root(root))
7220                 return 0;
7221
7222         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
7223
7224         /*
7225          * tree log blocks never actually go into the extent allocation
7226          * tree, just update pinning info and exit early.
7227          */
7228         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7229                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7230                 /* unlocks the pinned mutex */
7231                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
7232                 ret = 0;
7233         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7234                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7235                                         num_bytes,
7236                                         parent, root_objectid, (int)owner,
7237                                         BTRFS_DROP_DELAYED_REF, NULL);
7238         } else {
7239                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7240                                                 num_bytes,
7241                                                 parent, root_objectid, owner,
7242                                                 offset, 0,
7243                                                 BTRFS_DROP_DELAYED_REF, NULL);
7244         }
7245         return ret;
7246 }
7247
7248 /*
7249  * when we wait for progress in the block group caching, its because
7250  * our allocation attempt failed at least once.  So, we must sleep
7251  * and let some progress happen before we try again.
7252  *
7253  * This function will sleep at least once waiting for new free space to
7254  * show up, and then it will check the block group free space numbers
7255  * for our min num_bytes.  Another option is to have it go ahead
7256  * and look in the rbtree for a free extent of a given size, but this
7257  * is a good start.
7258  *
7259  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7260  * any of the information in this block group.
7261  */
7262 static noinline void
7263 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7264                                 u64 num_bytes)
7265 {
7266         struct btrfs_caching_control *caching_ctl;
7267
7268         caching_ctl = get_caching_control(cache);
7269         if (!caching_ctl)
7270                 return;
7271
7272         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7273                    (cache->free_space_ctl->free_space >= num_bytes));
7274
7275         put_caching_control(caching_ctl);
7276 }
7277
7278 static noinline int
7279 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7280 {
7281         struct btrfs_caching_control *caching_ctl;
7282         int ret = 0;
7283
7284         caching_ctl = get_caching_control(cache);
7285         if (!caching_ctl)
7286                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7287
7288         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7289         if (cache->cached == BTRFS_CACHE_ERROR)
7290                 ret = -EIO;
7291         put_caching_control(caching_ctl);
7292         return ret;
7293 }
7294
7295 int __get_raid_index(u64 flags)
7296 {
7297         if (flags & BTRFS_BLOCK_GROUP_RAID10)
7298                 return BTRFS_RAID_RAID10;
7299         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
7300                 return BTRFS_RAID_RAID1;
7301         else if (flags & BTRFS_BLOCK_GROUP_DUP)
7302                 return BTRFS_RAID_DUP;
7303         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
7304                 return BTRFS_RAID_RAID0;
7305         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
7306                 return BTRFS_RAID_RAID5;
7307         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
7308                 return BTRFS_RAID_RAID6;
7309
7310         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
7311 }
7312
7313 int get_block_group_index(struct btrfs_block_group_cache *cache)
7314 {
7315         return __get_raid_index(cache->flags);
7316 }
7317
7318 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7319         [BTRFS_RAID_RAID10]     = "raid10",
7320         [BTRFS_RAID_RAID1]      = "raid1",
7321         [BTRFS_RAID_DUP]        = "dup",
7322         [BTRFS_RAID_RAID0]      = "raid0",
7323         [BTRFS_RAID_SINGLE]     = "single",
7324         [BTRFS_RAID_RAID5]      = "raid5",
7325         [BTRFS_RAID_RAID6]      = "raid6",
7326 };
7327
7328 static const char *get_raid_name(enum btrfs_raid_types type)
7329 {
7330         if (type >= BTRFS_NR_RAID_TYPES)
7331                 return NULL;
7332
7333         return btrfs_raid_type_names[type];
7334 }
7335
7336 enum btrfs_loop_type {
7337         LOOP_CACHING_NOWAIT = 0,
7338         LOOP_CACHING_WAIT = 1,
7339         LOOP_ALLOC_CHUNK = 2,
7340         LOOP_NO_EMPTY_SIZE = 3,
7341 };
7342
7343 static inline void
7344 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7345                        int delalloc)
7346 {
7347         if (delalloc)
7348                 down_read(&cache->data_rwsem);
7349 }
7350
7351 static inline void
7352 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7353                        int delalloc)
7354 {
7355         btrfs_get_block_group(cache);
7356         if (delalloc)
7357                 down_read(&cache->data_rwsem);
7358 }
7359
7360 static struct btrfs_block_group_cache *
7361 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7362                    struct btrfs_free_cluster *cluster,
7363                    int delalloc)
7364 {
7365         struct btrfs_block_group_cache *used_bg = NULL;
7366
7367         spin_lock(&cluster->refill_lock);
7368         while (1) {
7369                 used_bg = cluster->block_group;
7370                 if (!used_bg)
7371                         return NULL;
7372
7373                 if (used_bg == block_group)
7374                         return used_bg;
7375
7376                 btrfs_get_block_group(used_bg);
7377
7378                 if (!delalloc)
7379                         return used_bg;
7380
7381                 if (down_read_trylock(&used_bg->data_rwsem))
7382                         return used_bg;
7383
7384                 spin_unlock(&cluster->refill_lock);
7385
7386                 down_read(&used_bg->data_rwsem);
7387
7388                 spin_lock(&cluster->refill_lock);
7389                 if (used_bg == cluster->block_group)
7390                         return used_bg;
7391
7392                 up_read(&used_bg->data_rwsem);
7393                 btrfs_put_block_group(used_bg);
7394         }
7395 }
7396
7397 static inline void
7398 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7399                          int delalloc)
7400 {
7401         if (delalloc)
7402                 up_read(&cache->data_rwsem);
7403         btrfs_put_block_group(cache);
7404 }
7405
7406 /*
7407  * walks the btree of allocated extents and find a hole of a given size.
7408  * The key ins is changed to record the hole:
7409  * ins->objectid == start position
7410  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7411  * ins->offset == the size of the hole.
7412  * Any available blocks before search_start are skipped.
7413  *
7414  * If there is no suitable free space, we will record the max size of
7415  * the free space extent currently.
7416  */
7417 static noinline int find_free_extent(struct btrfs_root *orig_root,
7418                                      u64 num_bytes, u64 empty_size,
7419                                      u64 hint_byte, struct btrfs_key *ins,
7420                                      u64 flags, int delalloc)
7421 {
7422         int ret = 0;
7423         struct btrfs_root *root = orig_root->fs_info->extent_root;
7424         struct btrfs_free_cluster *last_ptr = NULL;
7425         struct btrfs_block_group_cache *block_group = NULL;
7426         u64 search_start = 0;
7427         u64 max_extent_size = 0;
7428         u64 empty_cluster = 0;
7429         struct btrfs_space_info *space_info;
7430         int loop = 0;
7431         int index = __get_raid_index(flags);
7432         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7433                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7434         bool failed_cluster_refill = false;
7435         bool failed_alloc = false;
7436         bool use_cluster = true;
7437         bool have_caching_bg = false;
7438         bool orig_have_caching_bg = false;
7439         bool full_search = false;
7440
7441         WARN_ON(num_bytes < root->sectorsize);
7442         ins->type = BTRFS_EXTENT_ITEM_KEY;
7443         ins->objectid = 0;
7444         ins->offset = 0;
7445
7446         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7447
7448         space_info = __find_space_info(root->fs_info, flags);
7449         if (!space_info) {
7450                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7451                 return -ENOSPC;
7452         }
7453
7454         /*
7455          * If our free space is heavily fragmented we may not be able to make
7456          * big contiguous allocations, so instead of doing the expensive search
7457          * for free space, simply return ENOSPC with our max_extent_size so we
7458          * can go ahead and search for a more manageable chunk.
7459          *
7460          * If our max_extent_size is large enough for our allocation simply
7461          * disable clustering since we will likely not be able to find enough
7462          * space to create a cluster and induce latency trying.
7463          */
7464         if (unlikely(space_info->max_extent_size)) {
7465                 spin_lock(&space_info->lock);
7466                 if (space_info->max_extent_size &&
7467                     num_bytes > space_info->max_extent_size) {
7468                         ins->offset = space_info->max_extent_size;
7469                         spin_unlock(&space_info->lock);
7470                         return -ENOSPC;
7471                 } else if (space_info->max_extent_size) {
7472                         use_cluster = false;
7473                 }
7474                 spin_unlock(&space_info->lock);
7475         }
7476
7477         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7478         if (last_ptr) {
7479                 spin_lock(&last_ptr->lock);
7480                 if (last_ptr->block_group)
7481                         hint_byte = last_ptr->window_start;
7482                 if (last_ptr->fragmented) {
7483                         /*
7484                          * We still set window_start so we can keep track of the
7485                          * last place we found an allocation to try and save
7486                          * some time.
7487                          */
7488                         hint_byte = last_ptr->window_start;
7489                         use_cluster = false;
7490                 }
7491                 spin_unlock(&last_ptr->lock);
7492         }
7493
7494         search_start = max(search_start, first_logical_byte(root, 0));
7495         search_start = max(search_start, hint_byte);
7496         if (search_start == hint_byte) {
7497                 block_group = btrfs_lookup_block_group(root->fs_info,
7498                                                        search_start);
7499                 /*
7500                  * we don't want to use the block group if it doesn't match our
7501                  * allocation bits, or if its not cached.
7502                  *
7503                  * However if we are re-searching with an ideal block group
7504                  * picked out then we don't care that the block group is cached.
7505                  */
7506                 if (block_group && block_group_bits(block_group, flags) &&
7507                     block_group->cached != BTRFS_CACHE_NO) {
7508                         down_read(&space_info->groups_sem);
7509                         if (list_empty(&block_group->list) ||
7510                             block_group->ro) {
7511                                 /*
7512                                  * someone is removing this block group,
7513                                  * we can't jump into the have_block_group
7514                                  * target because our list pointers are not
7515                                  * valid
7516                                  */
7517                                 btrfs_put_block_group(block_group);
7518                                 up_read(&space_info->groups_sem);
7519                         } else {
7520                                 index = get_block_group_index(block_group);
7521                                 btrfs_lock_block_group(block_group, delalloc);
7522                                 goto have_block_group;
7523                         }
7524                 } else if (block_group) {
7525                         btrfs_put_block_group(block_group);
7526                 }
7527         }
7528 search:
7529         have_caching_bg = false;
7530         if (index == 0 || index == __get_raid_index(flags))
7531                 full_search = true;
7532         down_read(&space_info->groups_sem);
7533         list_for_each_entry(block_group, &space_info->block_groups[index],
7534                             list) {
7535                 u64 offset;
7536                 int cached;
7537
7538                 btrfs_grab_block_group(block_group, delalloc);
7539                 search_start = block_group->key.objectid;
7540
7541                 /*
7542                  * this can happen if we end up cycling through all the
7543                  * raid types, but we want to make sure we only allocate
7544                  * for the proper type.
7545                  */
7546                 if (!block_group_bits(block_group, flags)) {
7547                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7548                                 BTRFS_BLOCK_GROUP_RAID1 |
7549                                 BTRFS_BLOCK_GROUP_RAID5 |
7550                                 BTRFS_BLOCK_GROUP_RAID6 |
7551                                 BTRFS_BLOCK_GROUP_RAID10;
7552
7553                         /*
7554                          * if they asked for extra copies and this block group
7555                          * doesn't provide them, bail.  This does allow us to
7556                          * fill raid0 from raid1.
7557                          */
7558                         if ((flags & extra) && !(block_group->flags & extra))
7559                                 goto loop;
7560                 }
7561
7562 have_block_group:
7563                 cached = block_group_cache_done(block_group);
7564                 if (unlikely(!cached)) {
7565                         have_caching_bg = true;
7566                         ret = cache_block_group(block_group, 0);
7567                         BUG_ON(ret < 0);
7568                         ret = 0;
7569                 }
7570
7571                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7572                         goto loop;
7573                 if (unlikely(block_group->ro))
7574                         goto loop;
7575
7576                 /*
7577                  * Ok we want to try and use the cluster allocator, so
7578                  * lets look there
7579                  */
7580                 if (last_ptr && use_cluster) {
7581                         struct btrfs_block_group_cache *used_block_group;
7582                         unsigned long aligned_cluster;
7583                         /*
7584                          * the refill lock keeps out other
7585                          * people trying to start a new cluster
7586                          */
7587                         used_block_group = btrfs_lock_cluster(block_group,
7588                                                               last_ptr,
7589                                                               delalloc);
7590                         if (!used_block_group)
7591                                 goto refill_cluster;
7592
7593                         if (used_block_group != block_group &&
7594                             (used_block_group->ro ||
7595                              !block_group_bits(used_block_group, flags)))
7596                                 goto release_cluster;
7597
7598                         offset = btrfs_alloc_from_cluster(used_block_group,
7599                                                 last_ptr,
7600                                                 num_bytes,
7601                                                 used_block_group->key.objectid,
7602                                                 &max_extent_size);
7603                         if (offset) {
7604                                 /* we have a block, we're done */
7605                                 spin_unlock(&last_ptr->refill_lock);
7606                                 trace_btrfs_reserve_extent_cluster(root,
7607                                                 used_block_group,
7608                                                 search_start, num_bytes);
7609                                 if (used_block_group != block_group) {
7610                                         btrfs_release_block_group(block_group,
7611                                                                   delalloc);
7612                                         block_group = used_block_group;
7613                                 }
7614                                 goto checks;
7615                         }
7616
7617                         WARN_ON(last_ptr->block_group != used_block_group);
7618 release_cluster:
7619                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7620                          * set up a new clusters, so lets just skip it
7621                          * and let the allocator find whatever block
7622                          * it can find.  If we reach this point, we
7623                          * will have tried the cluster allocator
7624                          * plenty of times and not have found
7625                          * anything, so we are likely way too
7626                          * fragmented for the clustering stuff to find
7627                          * anything.
7628                          *
7629                          * However, if the cluster is taken from the
7630                          * current block group, release the cluster
7631                          * first, so that we stand a better chance of
7632                          * succeeding in the unclustered
7633                          * allocation.  */
7634                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7635                             used_block_group != block_group) {
7636                                 spin_unlock(&last_ptr->refill_lock);
7637                                 btrfs_release_block_group(used_block_group,
7638                                                           delalloc);
7639                                 goto unclustered_alloc;
7640                         }
7641
7642                         /*
7643                          * this cluster didn't work out, free it and
7644                          * start over
7645                          */
7646                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7647
7648                         if (used_block_group != block_group)
7649                                 btrfs_release_block_group(used_block_group,
7650                                                           delalloc);
7651 refill_cluster:
7652                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7653                                 spin_unlock(&last_ptr->refill_lock);
7654                                 goto unclustered_alloc;
7655                         }
7656
7657                         aligned_cluster = max_t(unsigned long,
7658                                                 empty_cluster + empty_size,
7659                                               block_group->full_stripe_len);
7660
7661                         /* allocate a cluster in this block group */
7662                         ret = btrfs_find_space_cluster(root, block_group,
7663                                                        last_ptr, search_start,
7664                                                        num_bytes,
7665                                                        aligned_cluster);
7666                         if (ret == 0) {
7667                                 /*
7668                                  * now pull our allocation out of this
7669                                  * cluster
7670                                  */
7671                                 offset = btrfs_alloc_from_cluster(block_group,
7672                                                         last_ptr,
7673                                                         num_bytes,
7674                                                         search_start,
7675                                                         &max_extent_size);
7676                                 if (offset) {
7677                                         /* we found one, proceed */
7678                                         spin_unlock(&last_ptr->refill_lock);
7679                                         trace_btrfs_reserve_extent_cluster(root,
7680                                                 block_group, search_start,
7681                                                 num_bytes);
7682                                         goto checks;
7683                                 }
7684                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7685                                    && !failed_cluster_refill) {
7686                                 spin_unlock(&last_ptr->refill_lock);
7687
7688                                 failed_cluster_refill = true;
7689                                 wait_block_group_cache_progress(block_group,
7690                                        num_bytes + empty_cluster + empty_size);
7691                                 goto have_block_group;
7692                         }
7693
7694                         /*
7695                          * at this point we either didn't find a cluster
7696                          * or we weren't able to allocate a block from our
7697                          * cluster.  Free the cluster we've been trying
7698                          * to use, and go to the next block group
7699                          */
7700                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7701                         spin_unlock(&last_ptr->refill_lock);
7702                         goto loop;
7703                 }
7704
7705 unclustered_alloc:
7706                 /*
7707                  * We are doing an unclustered alloc, set the fragmented flag so
7708                  * we don't bother trying to setup a cluster again until we get
7709                  * more space.
7710                  */
7711                 if (unlikely(last_ptr)) {
7712                         spin_lock(&last_ptr->lock);
7713                         last_ptr->fragmented = 1;
7714                         spin_unlock(&last_ptr->lock);
7715                 }
7716                 spin_lock(&block_group->free_space_ctl->tree_lock);
7717                 if (cached &&
7718                     block_group->free_space_ctl->free_space <
7719                     num_bytes + empty_cluster + empty_size) {
7720                         if (block_group->free_space_ctl->free_space >
7721                             max_extent_size)
7722                                 max_extent_size =
7723                                         block_group->free_space_ctl->free_space;
7724                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7725                         goto loop;
7726                 }
7727                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7728
7729                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7730                                                     num_bytes, empty_size,
7731                                                     &max_extent_size);
7732                 /*
7733                  * If we didn't find a chunk, and we haven't failed on this
7734                  * block group before, and this block group is in the middle of
7735                  * caching and we are ok with waiting, then go ahead and wait
7736                  * for progress to be made, and set failed_alloc to true.
7737                  *
7738                  * If failed_alloc is true then we've already waited on this
7739                  * block group once and should move on to the next block group.
7740                  */
7741                 if (!offset && !failed_alloc && !cached &&
7742                     loop > LOOP_CACHING_NOWAIT) {
7743                         wait_block_group_cache_progress(block_group,
7744                                                 num_bytes + empty_size);
7745                         failed_alloc = true;
7746                         goto have_block_group;
7747                 } else if (!offset) {
7748                         goto loop;
7749                 }
7750 checks:
7751                 search_start = ALIGN(offset, root->stripesize);
7752
7753                 /* move on to the next group */
7754                 if (search_start + num_bytes >
7755                     block_group->key.objectid + block_group->key.offset) {
7756                         btrfs_add_free_space(block_group, offset, num_bytes);
7757                         goto loop;
7758                 }
7759
7760                 if (offset < search_start)
7761                         btrfs_add_free_space(block_group, offset,
7762                                              search_start - offset);
7763                 BUG_ON(offset > search_start);
7764
7765                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7766                                                   alloc_type, delalloc);
7767                 if (ret == -EAGAIN) {
7768                         btrfs_add_free_space(block_group, offset, num_bytes);
7769                         goto loop;
7770                 }
7771                 btrfs_inc_block_group_reservations(block_group);
7772
7773                 /* we are all good, lets return */
7774                 ins->objectid = search_start;
7775                 ins->offset = num_bytes;
7776
7777                 trace_btrfs_reserve_extent(orig_root, block_group,
7778                                            search_start, num_bytes);
7779                 btrfs_release_block_group(block_group, delalloc);
7780                 break;
7781 loop:
7782                 failed_cluster_refill = false;
7783                 failed_alloc = false;
7784                 BUG_ON(index != get_block_group_index(block_group));
7785                 btrfs_release_block_group(block_group, delalloc);
7786         }
7787         up_read(&space_info->groups_sem);
7788
7789         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7790                 && !orig_have_caching_bg)
7791                 orig_have_caching_bg = true;
7792
7793         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7794                 goto search;
7795
7796         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7797                 goto search;
7798
7799         /*
7800          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7801          *                      caching kthreads as we move along
7802          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7803          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7804          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7805          *                      again
7806          */
7807         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7808                 index = 0;
7809                 if (loop == LOOP_CACHING_NOWAIT) {
7810                         /*
7811                          * We want to skip the LOOP_CACHING_WAIT step if we
7812                          * don't have any uncached bgs and we've already done a
7813                          * full search through.
7814                          */
7815                         if (orig_have_caching_bg || !full_search)
7816                                 loop = LOOP_CACHING_WAIT;
7817                         else
7818                                 loop = LOOP_ALLOC_CHUNK;
7819                 } else {
7820                         loop++;
7821                 }
7822
7823                 if (loop == LOOP_ALLOC_CHUNK) {
7824                         struct btrfs_trans_handle *trans;
7825                         int exist = 0;
7826
7827                         trans = current->journal_info;
7828                         if (trans)
7829                                 exist = 1;
7830                         else
7831                                 trans = btrfs_join_transaction(root);
7832
7833                         if (IS_ERR(trans)) {
7834                                 ret = PTR_ERR(trans);
7835                                 goto out;
7836                         }
7837
7838                         ret = do_chunk_alloc(trans, root, flags,
7839                                              CHUNK_ALLOC_FORCE);
7840
7841                         /*
7842                          * If we can't allocate a new chunk we've already looped
7843                          * through at least once, move on to the NO_EMPTY_SIZE
7844                          * case.
7845                          */
7846                         if (ret == -ENOSPC)
7847                                 loop = LOOP_NO_EMPTY_SIZE;
7848
7849                         /*
7850                          * Do not bail out on ENOSPC since we
7851                          * can do more things.
7852                          */
7853                         if (ret < 0 && ret != -ENOSPC)
7854                                 btrfs_abort_transaction(trans,
7855                                                         root, ret);
7856                         else
7857                                 ret = 0;
7858                         if (!exist)
7859                                 btrfs_end_transaction(trans, root);
7860                         if (ret)
7861                                 goto out;
7862                 }
7863
7864                 if (loop == LOOP_NO_EMPTY_SIZE) {
7865                         /*
7866                          * Don't loop again if we already have no empty_size and
7867                          * no empty_cluster.
7868                          */
7869                         if (empty_size == 0 &&
7870                             empty_cluster == 0) {
7871                                 ret = -ENOSPC;
7872                                 goto out;
7873                         }
7874                         empty_size = 0;
7875                         empty_cluster = 0;
7876                 }
7877
7878                 goto search;
7879         } else if (!ins->objectid) {
7880                 ret = -ENOSPC;
7881         } else if (ins->objectid) {
7882                 if (!use_cluster && last_ptr) {
7883                         spin_lock(&last_ptr->lock);
7884                         last_ptr->window_start = ins->objectid;
7885                         spin_unlock(&last_ptr->lock);
7886                 }
7887                 ret = 0;
7888         }
7889 out:
7890         if (ret == -ENOSPC) {
7891                 spin_lock(&space_info->lock);
7892                 space_info->max_extent_size = max_extent_size;
7893                 spin_unlock(&space_info->lock);
7894                 ins->offset = max_extent_size;
7895         }
7896         return ret;
7897 }
7898
7899 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7900                             int dump_block_groups)
7901 {
7902         struct btrfs_block_group_cache *cache;
7903         int index = 0;
7904
7905         spin_lock(&info->lock);
7906         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7907                info->flags,
7908                info->total_bytes - info->bytes_used - info->bytes_pinned -
7909                info->bytes_reserved - info->bytes_readonly,
7910                (info->full) ? "" : "not ");
7911         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7912                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7913                info->total_bytes, info->bytes_used, info->bytes_pinned,
7914                info->bytes_reserved, info->bytes_may_use,
7915                info->bytes_readonly);
7916         spin_unlock(&info->lock);
7917
7918         if (!dump_block_groups)
7919                 return;
7920
7921         down_read(&info->groups_sem);
7922 again:
7923         list_for_each_entry(cache, &info->block_groups[index], list) {
7924                 spin_lock(&cache->lock);
7925                 printk(KERN_INFO "BTRFS: "
7926                            "block group %llu has %llu bytes, "
7927                            "%llu used %llu pinned %llu reserved %s\n",
7928                        cache->key.objectid, cache->key.offset,
7929                        btrfs_block_group_used(&cache->item), cache->pinned,
7930                        cache->reserved, cache->ro ? "[readonly]" : "");
7931                 btrfs_dump_free_space(cache, bytes);
7932                 spin_unlock(&cache->lock);
7933         }
7934         if (++index < BTRFS_NR_RAID_TYPES)
7935                 goto again;
7936         up_read(&info->groups_sem);
7937 }
7938
7939 int btrfs_reserve_extent(struct btrfs_root *root,
7940                          u64 num_bytes, u64 min_alloc_size,
7941                          u64 empty_size, u64 hint_byte,
7942                          struct btrfs_key *ins, int is_data, int delalloc)
7943 {
7944         bool final_tried = num_bytes == min_alloc_size;
7945         u64 flags;
7946         int ret;
7947
7948         flags = btrfs_get_alloc_profile(root, is_data);
7949 again:
7950         WARN_ON(num_bytes < root->sectorsize);
7951         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7952                                flags, delalloc);
7953         if (!ret && !is_data) {
7954                 btrfs_dec_block_group_reservations(root->fs_info,
7955                                                    ins->objectid);
7956         } else if (ret == -ENOSPC) {
7957                 if (!final_tried && ins->offset) {
7958                         num_bytes = min(num_bytes >> 1, ins->offset);
7959                         num_bytes = round_down(num_bytes, root->sectorsize);
7960                         num_bytes = max(num_bytes, min_alloc_size);
7961                         if (num_bytes == min_alloc_size)
7962                                 final_tried = true;
7963                         goto again;
7964                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7965                         struct btrfs_space_info *sinfo;
7966
7967                         sinfo = __find_space_info(root->fs_info, flags);
7968                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7969                                 flags, num_bytes);
7970                         if (sinfo)
7971                                 dump_space_info(sinfo, num_bytes, 1);
7972                 }
7973         }
7974
7975         return ret;
7976 }
7977
7978 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7979                                         u64 start, u64 len,
7980                                         int pin, int delalloc)
7981 {
7982         struct btrfs_block_group_cache *cache;
7983         int ret = 0;
7984
7985         cache = btrfs_lookup_block_group(root->fs_info, start);
7986         if (!cache) {
7987                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7988                         start);
7989                 return -ENOSPC;
7990         }
7991
7992         if (pin)
7993                 pin_down_extent(root, cache, start, len, 1);
7994         else {
7995                 if (btrfs_test_opt(root, DISCARD))
7996                         ret = btrfs_discard_extent(root, start, len, NULL);
7997                 btrfs_add_free_space(cache, start, len);
7998                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7999                 trace_btrfs_reserved_extent_free(root, start, len);
8000         }
8001
8002         btrfs_put_block_group(cache);
8003         return ret;
8004 }
8005
8006 int btrfs_free_reserved_extent(struct btrfs_root *root,
8007                                u64 start, u64 len, int delalloc)
8008 {
8009         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
8010 }
8011
8012 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
8013                                        u64 start, u64 len)
8014 {
8015         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
8016 }
8017
8018 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8019                                       struct btrfs_root *root,
8020                                       u64 parent, u64 root_objectid,
8021                                       u64 flags, u64 owner, u64 offset,
8022                                       struct btrfs_key *ins, int ref_mod)
8023 {
8024         int ret;
8025         struct btrfs_fs_info *fs_info = root->fs_info;
8026         struct btrfs_extent_item *extent_item;
8027         struct btrfs_extent_inline_ref *iref;
8028         struct btrfs_path *path;
8029         struct extent_buffer *leaf;
8030         int type;
8031         u32 size;
8032
8033         if (parent > 0)
8034                 type = BTRFS_SHARED_DATA_REF_KEY;
8035         else
8036                 type = BTRFS_EXTENT_DATA_REF_KEY;
8037
8038         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8039
8040         path = btrfs_alloc_path();
8041         if (!path)
8042                 return -ENOMEM;
8043
8044         path->leave_spinning = 1;
8045         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8046                                       ins, size);
8047         if (ret) {
8048                 btrfs_free_path(path);
8049                 return ret;
8050         }
8051
8052         leaf = path->nodes[0];
8053         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8054                                      struct btrfs_extent_item);
8055         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8056         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8057         btrfs_set_extent_flags(leaf, extent_item,
8058                                flags | BTRFS_EXTENT_FLAG_DATA);
8059
8060         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8061         btrfs_set_extent_inline_ref_type(leaf, iref, type);
8062         if (parent > 0) {
8063                 struct btrfs_shared_data_ref *ref;
8064                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
8065                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8066                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8067         } else {
8068                 struct btrfs_extent_data_ref *ref;
8069                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8070                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8071                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8072                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8073                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8074         }
8075
8076         btrfs_mark_buffer_dirty(path->nodes[0]);
8077         btrfs_free_path(path);
8078
8079         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8080                                           ins->offset);
8081         if (ret)
8082                 return ret;
8083
8084         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
8085         if (ret) { /* -ENOENT, logic error */
8086                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8087                         ins->objectid, ins->offset);
8088                 BUG();
8089         }
8090         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
8091         return ret;
8092 }
8093
8094 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8095                                      struct btrfs_root *root,
8096                                      u64 parent, u64 root_objectid,
8097                                      u64 flags, struct btrfs_disk_key *key,
8098                                      int level, struct btrfs_key *ins)
8099 {
8100         int ret;
8101         struct btrfs_fs_info *fs_info = root->fs_info;
8102         struct btrfs_extent_item *extent_item;
8103         struct btrfs_tree_block_info *block_info;
8104         struct btrfs_extent_inline_ref *iref;
8105         struct btrfs_path *path;
8106         struct extent_buffer *leaf;
8107         u32 size = sizeof(*extent_item) + sizeof(*iref);
8108         u64 num_bytes = ins->offset;
8109         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8110                                                  SKINNY_METADATA);
8111
8112         if (!skinny_metadata)
8113                 size += sizeof(*block_info);
8114
8115         path = btrfs_alloc_path();
8116         if (!path) {
8117                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8118                                                    root->nodesize);
8119                 return -ENOMEM;
8120         }
8121
8122         path->leave_spinning = 1;
8123         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8124                                       ins, size);
8125         if (ret) {
8126                 btrfs_free_path(path);
8127                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8128                                                    root->nodesize);
8129                 return ret;
8130         }
8131
8132         leaf = path->nodes[0];
8133         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8134                                      struct btrfs_extent_item);
8135         btrfs_set_extent_refs(leaf, extent_item, 1);
8136         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8137         btrfs_set_extent_flags(leaf, extent_item,
8138                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8139
8140         if (skinny_metadata) {
8141                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8142                 num_bytes = root->nodesize;
8143         } else {
8144                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8145                 btrfs_set_tree_block_key(leaf, block_info, key);
8146                 btrfs_set_tree_block_level(leaf, block_info, level);
8147                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8148         }
8149
8150         if (parent > 0) {
8151                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8152                 btrfs_set_extent_inline_ref_type(leaf, iref,
8153                                                  BTRFS_SHARED_BLOCK_REF_KEY);
8154                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8155         } else {
8156                 btrfs_set_extent_inline_ref_type(leaf, iref,
8157                                                  BTRFS_TREE_BLOCK_REF_KEY);
8158                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
8159         }
8160
8161         btrfs_mark_buffer_dirty(leaf);
8162         btrfs_free_path(path);
8163
8164         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8165                                           num_bytes);
8166         if (ret)
8167                 return ret;
8168
8169         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
8170                                  1);
8171         if (ret) { /* -ENOENT, logic error */
8172                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8173                         ins->objectid, ins->offset);
8174                 BUG();
8175         }
8176
8177         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
8178         return ret;
8179 }
8180
8181 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8182                                      struct btrfs_root *root,
8183                                      u64 root_objectid, u64 owner,
8184                                      u64 offset, u64 ram_bytes,
8185                                      struct btrfs_key *ins)
8186 {
8187         int ret;
8188
8189         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
8190
8191         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
8192                                          ins->offset, 0,
8193                                          root_objectid, owner, offset,
8194                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
8195                                          NULL);
8196         return ret;
8197 }
8198
8199 /*
8200  * this is used by the tree logging recovery code.  It records that
8201  * an extent has been allocated and makes sure to clear the free
8202  * space cache bits as well
8203  */
8204 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8205                                    struct btrfs_root *root,
8206                                    u64 root_objectid, u64 owner, u64 offset,
8207                                    struct btrfs_key *ins)
8208 {
8209         int ret;
8210         struct btrfs_block_group_cache *block_group;
8211
8212         /*
8213          * Mixed block groups will exclude before processing the log so we only
8214          * need to do the exclude dance if this fs isn't mixed.
8215          */
8216         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
8217                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
8218                 if (ret)
8219                         return ret;
8220         }
8221
8222         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
8223         if (!block_group)
8224                 return -EINVAL;
8225
8226         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
8227                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
8228         BUG_ON(ret); /* logic error */
8229         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
8230                                          0, owner, offset, ins, 1);
8231         btrfs_put_block_group(block_group);
8232         return ret;
8233 }
8234
8235 static struct extent_buffer *
8236 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8237                       u64 bytenr, int level)
8238 {
8239         struct extent_buffer *buf;
8240
8241         buf = btrfs_find_create_tree_block(root, bytenr);
8242         if (IS_ERR(buf))
8243                 return buf;
8244
8245         btrfs_set_header_generation(buf, trans->transid);
8246         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8247         btrfs_tree_lock(buf);
8248         clean_tree_block(trans, root->fs_info, buf);
8249         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8250
8251         btrfs_set_lock_blocking(buf);
8252         set_extent_buffer_uptodate(buf);
8253
8254         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8255                 buf->log_index = root->log_transid % 2;
8256                 /*
8257                  * we allow two log transactions at a time, use different
8258                  * EXENT bit to differentiate dirty pages.
8259                  */
8260                 if (buf->log_index == 0)
8261                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8262                                         buf->start + buf->len - 1, GFP_NOFS);
8263                 else
8264                         set_extent_new(&root->dirty_log_pages, buf->start,
8265                                         buf->start + buf->len - 1);
8266         } else {
8267                 buf->log_index = -1;
8268                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8269                          buf->start + buf->len - 1, GFP_NOFS);
8270         }
8271         trans->dirty = true;
8272         /* this returns a buffer locked for blocking */
8273         return buf;
8274 }
8275
8276 static struct btrfs_block_rsv *
8277 use_block_rsv(struct btrfs_trans_handle *trans,
8278               struct btrfs_root *root, u32 blocksize)
8279 {
8280         struct btrfs_block_rsv *block_rsv;
8281         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
8282         int ret;
8283         bool global_updated = false;
8284
8285         block_rsv = get_block_rsv(trans, root);
8286
8287         if (unlikely(block_rsv->size == 0))
8288                 goto try_reserve;
8289 again:
8290         ret = block_rsv_use_bytes(block_rsv, blocksize);
8291         if (!ret)
8292                 return block_rsv;
8293
8294         if (block_rsv->failfast)
8295                 return ERR_PTR(ret);
8296
8297         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8298                 global_updated = true;
8299                 update_global_block_rsv(root->fs_info);
8300                 goto again;
8301         }
8302
8303         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
8304                 static DEFINE_RATELIMIT_STATE(_rs,
8305                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8306                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8307                 if (__ratelimit(&_rs))
8308                         WARN(1, KERN_DEBUG
8309                                 "BTRFS: block rsv returned %d\n", ret);
8310         }
8311 try_reserve:
8312         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8313                                      BTRFS_RESERVE_NO_FLUSH);
8314         if (!ret)
8315                 return block_rsv;
8316         /*
8317          * If we couldn't reserve metadata bytes try and use some from
8318          * the global reserve if its space type is the same as the global
8319          * reservation.
8320          */
8321         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8322             block_rsv->space_info == global_rsv->space_info) {
8323                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8324                 if (!ret)
8325                         return global_rsv;
8326         }
8327         return ERR_PTR(ret);
8328 }
8329
8330 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8331                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8332 {
8333         block_rsv_add_bytes(block_rsv, blocksize, 0);
8334         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8335 }
8336
8337 /*
8338  * finds a free extent and does all the dirty work required for allocation
8339  * returns the tree buffer or an ERR_PTR on error.
8340  */
8341 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8342                                         struct btrfs_root *root,
8343                                         u64 parent, u64 root_objectid,
8344                                         struct btrfs_disk_key *key, int level,
8345                                         u64 hint, u64 empty_size)
8346 {
8347         struct btrfs_key ins;
8348         struct btrfs_block_rsv *block_rsv;
8349         struct extent_buffer *buf;
8350         struct btrfs_delayed_extent_op *extent_op;
8351         u64 flags = 0;
8352         int ret;
8353         u32 blocksize = root->nodesize;
8354         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8355                                                  SKINNY_METADATA);
8356
8357         if (btrfs_test_is_dummy_root(root)) {
8358                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8359                                             level);
8360                 if (!IS_ERR(buf))
8361                         root->alloc_bytenr += blocksize;
8362                 return buf;
8363         }
8364
8365         block_rsv = use_block_rsv(trans, root, blocksize);
8366         if (IS_ERR(block_rsv))
8367                 return ERR_CAST(block_rsv);
8368
8369         ret = btrfs_reserve_extent(root, blocksize, blocksize,
8370                                    empty_size, hint, &ins, 0, 0);
8371         if (ret)
8372                 goto out_unuse;
8373
8374         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8375         if (IS_ERR(buf)) {
8376                 ret = PTR_ERR(buf);
8377                 goto out_free_reserved;
8378         }
8379
8380         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8381                 if (parent == 0)
8382                         parent = ins.objectid;
8383                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8384         } else
8385                 BUG_ON(parent > 0);
8386
8387         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8388                 extent_op = btrfs_alloc_delayed_extent_op();
8389                 if (!extent_op) {
8390                         ret = -ENOMEM;
8391                         goto out_free_buf;
8392                 }
8393                 if (key)
8394                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8395                 else
8396                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8397                 extent_op->flags_to_set = flags;
8398                 extent_op->update_key = skinny_metadata ? false : true;
8399                 extent_op->update_flags = true;
8400                 extent_op->is_data = false;
8401                 extent_op->level = level;
8402
8403                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8404                                                  ins.objectid, ins.offset,
8405                                                  parent, root_objectid, level,
8406                                                  BTRFS_ADD_DELAYED_EXTENT,
8407                                                  extent_op);
8408                 if (ret)
8409                         goto out_free_delayed;
8410         }
8411         return buf;
8412
8413 out_free_delayed:
8414         btrfs_free_delayed_extent_op(extent_op);
8415 out_free_buf:
8416         free_extent_buffer(buf);
8417 out_free_reserved:
8418         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8419 out_unuse:
8420         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8421         return ERR_PTR(ret);
8422 }
8423
8424 struct walk_control {
8425         u64 refs[BTRFS_MAX_LEVEL];
8426         u64 flags[BTRFS_MAX_LEVEL];
8427         struct btrfs_key update_progress;
8428         int stage;
8429         int level;
8430         int shared_level;
8431         int update_ref;
8432         int keep_locks;
8433         int reada_slot;
8434         int reada_count;
8435         int for_reloc;
8436 };
8437
8438 #define DROP_REFERENCE  1
8439 #define UPDATE_BACKREF  2
8440
8441 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8442                                      struct btrfs_root *root,
8443                                      struct walk_control *wc,
8444                                      struct btrfs_path *path)
8445 {
8446         u64 bytenr;
8447         u64 generation;
8448         u64 refs;
8449         u64 flags;
8450         u32 nritems;
8451         u32 blocksize;
8452         struct btrfs_key key;
8453         struct extent_buffer *eb;
8454         int ret;
8455         int slot;
8456         int nread = 0;
8457
8458         if (path->slots[wc->level] < wc->reada_slot) {
8459                 wc->reada_count = wc->reada_count * 2 / 3;
8460                 wc->reada_count = max(wc->reada_count, 2);
8461         } else {
8462                 wc->reada_count = wc->reada_count * 3 / 2;
8463                 wc->reada_count = min_t(int, wc->reada_count,
8464                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8465         }
8466
8467         eb = path->nodes[wc->level];
8468         nritems = btrfs_header_nritems(eb);
8469         blocksize = root->nodesize;
8470
8471         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8472                 if (nread >= wc->reada_count)
8473                         break;
8474
8475                 cond_resched();
8476                 bytenr = btrfs_node_blockptr(eb, slot);
8477                 generation = btrfs_node_ptr_generation(eb, slot);
8478
8479                 if (slot == path->slots[wc->level])
8480                         goto reada;
8481
8482                 if (wc->stage == UPDATE_BACKREF &&
8483                     generation <= root->root_key.offset)
8484                         continue;
8485
8486                 /* We don't lock the tree block, it's OK to be racy here */
8487                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8488                                                wc->level - 1, 1, &refs,
8489                                                &flags);
8490                 /* We don't care about errors in readahead. */
8491                 if (ret < 0)
8492                         continue;
8493                 BUG_ON(refs == 0);
8494
8495                 if (wc->stage == DROP_REFERENCE) {
8496                         if (refs == 1)
8497                                 goto reada;
8498
8499                         if (wc->level == 1 &&
8500                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8501                                 continue;
8502                         if (!wc->update_ref ||
8503                             generation <= root->root_key.offset)
8504                                 continue;
8505                         btrfs_node_key_to_cpu(eb, &key, slot);
8506                         ret = btrfs_comp_cpu_keys(&key,
8507                                                   &wc->update_progress);
8508                         if (ret < 0)
8509                                 continue;
8510                 } else {
8511                         if (wc->level == 1 &&
8512                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8513                                 continue;
8514                 }
8515 reada:
8516                 readahead_tree_block(root, bytenr);
8517                 nread++;
8518         }
8519         wc->reada_slot = slot;
8520 }
8521
8522 /*
8523  * These may not be seen by the usual inc/dec ref code so we have to
8524  * add them here.
8525  */
8526 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8527                                      struct btrfs_root *root, u64 bytenr,
8528                                      u64 num_bytes)
8529 {
8530         struct btrfs_qgroup_extent_record *qrecord;
8531         struct btrfs_delayed_ref_root *delayed_refs;
8532
8533         qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8534         if (!qrecord)
8535                 return -ENOMEM;
8536
8537         qrecord->bytenr = bytenr;
8538         qrecord->num_bytes = num_bytes;
8539         qrecord->old_roots = NULL;
8540
8541         delayed_refs = &trans->transaction->delayed_refs;
8542         spin_lock(&delayed_refs->lock);
8543         if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8544                 kfree(qrecord);
8545         spin_unlock(&delayed_refs->lock);
8546
8547         return 0;
8548 }
8549
8550 static int account_leaf_items(struct btrfs_trans_handle *trans,
8551                               struct btrfs_root *root,
8552                               struct extent_buffer *eb)
8553 {
8554         int nr = btrfs_header_nritems(eb);
8555         int i, extent_type, ret;
8556         struct btrfs_key key;
8557         struct btrfs_file_extent_item *fi;
8558         u64 bytenr, num_bytes;
8559
8560         /* We can be called directly from walk_up_proc() */
8561         if (!root->fs_info->quota_enabled)
8562                 return 0;
8563
8564         for (i = 0; i < nr; i++) {
8565                 btrfs_item_key_to_cpu(eb, &key, i);
8566
8567                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8568                         continue;
8569
8570                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8571                 /* filter out non qgroup-accountable extents  */
8572                 extent_type = btrfs_file_extent_type(eb, fi);
8573
8574                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8575                         continue;
8576
8577                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8578                 if (!bytenr)
8579                         continue;
8580
8581                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8582
8583                 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8584                 if (ret)
8585                         return ret;
8586         }
8587         return 0;
8588 }
8589
8590 /*
8591  * Walk up the tree from the bottom, freeing leaves and any interior
8592  * nodes which have had all slots visited. If a node (leaf or
8593  * interior) is freed, the node above it will have it's slot
8594  * incremented. The root node will never be freed.
8595  *
8596  * At the end of this function, we should have a path which has all
8597  * slots incremented to the next position for a search. If we need to
8598  * read a new node it will be NULL and the node above it will have the
8599  * correct slot selected for a later read.
8600  *
8601  * If we increment the root nodes slot counter past the number of
8602  * elements, 1 is returned to signal completion of the search.
8603  */
8604 static int adjust_slots_upwards(struct btrfs_root *root,
8605                                 struct btrfs_path *path, int root_level)
8606 {
8607         int level = 0;
8608         int nr, slot;
8609         struct extent_buffer *eb;
8610
8611         if (root_level == 0)
8612                 return 1;
8613
8614         while (level <= root_level) {
8615                 eb = path->nodes[level];
8616                 nr = btrfs_header_nritems(eb);
8617                 path->slots[level]++;
8618                 slot = path->slots[level];
8619                 if (slot >= nr || level == 0) {
8620                         /*
8621                          * Don't free the root -  we will detect this
8622                          * condition after our loop and return a
8623                          * positive value for caller to stop walking the tree.
8624                          */
8625                         if (level != root_level) {
8626                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8627                                 path->locks[level] = 0;
8628
8629                                 free_extent_buffer(eb);
8630                                 path->nodes[level] = NULL;
8631                                 path->slots[level] = 0;
8632                         }
8633                 } else {
8634                         /*
8635                          * We have a valid slot to walk back down
8636                          * from. Stop here so caller can process these
8637                          * new nodes.
8638                          */
8639                         break;
8640                 }
8641
8642                 level++;
8643         }
8644
8645         eb = path->nodes[root_level];
8646         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8647                 return 1;
8648
8649         return 0;
8650 }
8651
8652 /*
8653  * root_eb is the subtree root and is locked before this function is called.
8654  */
8655 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8656                                   struct btrfs_root *root,
8657                                   struct extent_buffer *root_eb,
8658                                   u64 root_gen,
8659                                   int root_level)
8660 {
8661         int ret = 0;
8662         int level;
8663         struct extent_buffer *eb = root_eb;
8664         struct btrfs_path *path = NULL;
8665
8666         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8667         BUG_ON(root_eb == NULL);
8668
8669         if (!root->fs_info->quota_enabled)
8670                 return 0;
8671
8672         if (!extent_buffer_uptodate(root_eb)) {
8673                 ret = btrfs_read_buffer(root_eb, root_gen);
8674                 if (ret)
8675                         goto out;
8676         }
8677
8678         if (root_level == 0) {
8679                 ret = account_leaf_items(trans, root, root_eb);
8680                 goto out;
8681         }
8682
8683         path = btrfs_alloc_path();
8684         if (!path)
8685                 return -ENOMEM;
8686
8687         /*
8688          * Walk down the tree.  Missing extent blocks are filled in as
8689          * we go. Metadata is accounted every time we read a new
8690          * extent block.
8691          *
8692          * When we reach a leaf, we account for file extent items in it,
8693          * walk back up the tree (adjusting slot pointers as we go)
8694          * and restart the search process.
8695          */
8696         extent_buffer_get(root_eb); /* For path */
8697         path->nodes[root_level] = root_eb;
8698         path->slots[root_level] = 0;
8699         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8700 walk_down:
8701         level = root_level;
8702         while (level >= 0) {
8703                 if (path->nodes[level] == NULL) {
8704                         int parent_slot;
8705                         u64 child_gen;
8706                         u64 child_bytenr;
8707
8708                         /* We need to get child blockptr/gen from
8709                          * parent before we can read it. */
8710                         eb = path->nodes[level + 1];
8711                         parent_slot = path->slots[level + 1];
8712                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8713                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8714
8715                         eb = read_tree_block(root, child_bytenr, child_gen);
8716                         if (IS_ERR(eb)) {
8717                                 ret = PTR_ERR(eb);
8718                                 goto out;
8719                         } else if (!extent_buffer_uptodate(eb)) {
8720                                 free_extent_buffer(eb);
8721                                 ret = -EIO;
8722                                 goto out;
8723                         }
8724
8725                         path->nodes[level] = eb;
8726                         path->slots[level] = 0;
8727
8728                         btrfs_tree_read_lock(eb);
8729                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8730                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8731
8732                         ret = record_one_subtree_extent(trans, root, child_bytenr,
8733                                                         root->nodesize);
8734                         if (ret)
8735                                 goto out;
8736                 }
8737
8738                 if (level == 0) {
8739                         ret = account_leaf_items(trans, root, path->nodes[level]);
8740                         if (ret)
8741                                 goto out;
8742
8743                         /* Nonzero return here means we completed our search */
8744                         ret = adjust_slots_upwards(root, path, root_level);
8745                         if (ret)
8746                                 break;
8747
8748                         /* Restart search with new slots */
8749                         goto walk_down;
8750                 }
8751
8752                 level--;
8753         }
8754
8755         ret = 0;
8756 out:
8757         btrfs_free_path(path);
8758
8759         return ret;
8760 }
8761
8762 /*
8763  * helper to process tree block while walking down the tree.
8764  *
8765  * when wc->stage == UPDATE_BACKREF, this function updates
8766  * back refs for pointers in the block.
8767  *
8768  * NOTE: return value 1 means we should stop walking down.
8769  */
8770 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8771                                    struct btrfs_root *root,
8772                                    struct btrfs_path *path,
8773                                    struct walk_control *wc, int lookup_info)
8774 {
8775         int level = wc->level;
8776         struct extent_buffer *eb = path->nodes[level];
8777         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8778         int ret;
8779
8780         if (wc->stage == UPDATE_BACKREF &&
8781             btrfs_header_owner(eb) != root->root_key.objectid)
8782                 return 1;
8783
8784         /*
8785          * when reference count of tree block is 1, it won't increase
8786          * again. once full backref flag is set, we never clear it.
8787          */
8788         if (lookup_info &&
8789             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8790              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8791                 BUG_ON(!path->locks[level]);
8792                 ret = btrfs_lookup_extent_info(trans, root,
8793                                                eb->start, level, 1,
8794                                                &wc->refs[level],
8795                                                &wc->flags[level]);
8796                 BUG_ON(ret == -ENOMEM);
8797                 if (ret)
8798                         return ret;
8799                 BUG_ON(wc->refs[level] == 0);
8800         }
8801
8802         if (wc->stage == DROP_REFERENCE) {
8803                 if (wc->refs[level] > 1)
8804                         return 1;
8805
8806                 if (path->locks[level] && !wc->keep_locks) {
8807                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8808                         path->locks[level] = 0;
8809                 }
8810                 return 0;
8811         }
8812
8813         /* wc->stage == UPDATE_BACKREF */
8814         if (!(wc->flags[level] & flag)) {
8815                 BUG_ON(!path->locks[level]);
8816                 ret = btrfs_inc_ref(trans, root, eb, 1);
8817                 BUG_ON(ret); /* -ENOMEM */
8818                 ret = btrfs_dec_ref(trans, root, eb, 0);
8819                 BUG_ON(ret); /* -ENOMEM */
8820                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8821                                                   eb->len, flag,
8822                                                   btrfs_header_level(eb), 0);
8823                 BUG_ON(ret); /* -ENOMEM */
8824                 wc->flags[level] |= flag;
8825         }
8826
8827         /*
8828          * the block is shared by multiple trees, so it's not good to
8829          * keep the tree lock
8830          */
8831         if (path->locks[level] && level > 0) {
8832                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8833                 path->locks[level] = 0;
8834         }
8835         return 0;
8836 }
8837
8838 /*
8839  * helper to process tree block pointer.
8840  *
8841  * when wc->stage == DROP_REFERENCE, this function checks
8842  * reference count of the block pointed to. if the block
8843  * is shared and we need update back refs for the subtree
8844  * rooted at the block, this function changes wc->stage to
8845  * UPDATE_BACKREF. if the block is shared and there is no
8846  * need to update back, this function drops the reference
8847  * to the block.
8848  *
8849  * NOTE: return value 1 means we should stop walking down.
8850  */
8851 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8852                                  struct btrfs_root *root,
8853                                  struct btrfs_path *path,
8854                                  struct walk_control *wc, int *lookup_info)
8855 {
8856         u64 bytenr;
8857         u64 generation;
8858         u64 parent;
8859         u32 blocksize;
8860         struct btrfs_key key;
8861         struct extent_buffer *next;
8862         int level = wc->level;
8863         int reada = 0;
8864         int ret = 0;
8865         bool need_account = false;
8866
8867         generation = btrfs_node_ptr_generation(path->nodes[level],
8868                                                path->slots[level]);
8869         /*
8870          * if the lower level block was created before the snapshot
8871          * was created, we know there is no need to update back refs
8872          * for the subtree
8873          */
8874         if (wc->stage == UPDATE_BACKREF &&
8875             generation <= root->root_key.offset) {
8876                 *lookup_info = 1;
8877                 return 1;
8878         }
8879
8880         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8881         blocksize = root->nodesize;
8882
8883         next = btrfs_find_tree_block(root->fs_info, bytenr);
8884         if (!next) {
8885                 next = btrfs_find_create_tree_block(root, bytenr);
8886                 if (IS_ERR(next))
8887                         return PTR_ERR(next);
8888
8889                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8890                                                level - 1);
8891                 reada = 1;
8892         }
8893         btrfs_tree_lock(next);
8894         btrfs_set_lock_blocking(next);
8895
8896         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8897                                        &wc->refs[level - 1],
8898                                        &wc->flags[level - 1]);
8899         if (ret < 0) {
8900                 btrfs_tree_unlock(next);
8901                 return ret;
8902         }
8903
8904         if (unlikely(wc->refs[level - 1] == 0)) {
8905                 btrfs_err(root->fs_info, "Missing references.");
8906                 BUG();
8907         }
8908         *lookup_info = 0;
8909
8910         if (wc->stage == DROP_REFERENCE) {
8911                 if (wc->refs[level - 1] > 1) {
8912                         need_account = true;
8913                         if (level == 1 &&
8914                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8915                                 goto skip;
8916
8917                         if (!wc->update_ref ||
8918                             generation <= root->root_key.offset)
8919                                 goto skip;
8920
8921                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8922                                               path->slots[level]);
8923                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8924                         if (ret < 0)
8925                                 goto skip;
8926
8927                         wc->stage = UPDATE_BACKREF;
8928                         wc->shared_level = level - 1;
8929                 }
8930         } else {
8931                 if (level == 1 &&
8932                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8933                         goto skip;
8934         }
8935
8936         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8937                 btrfs_tree_unlock(next);
8938                 free_extent_buffer(next);
8939                 next = NULL;
8940                 *lookup_info = 1;
8941         }
8942
8943         if (!next) {
8944                 if (reada && level == 1)
8945                         reada_walk_down(trans, root, wc, path);
8946                 next = read_tree_block(root, bytenr, generation);
8947                 if (IS_ERR(next)) {
8948                         return PTR_ERR(next);
8949                 } else if (!extent_buffer_uptodate(next)) {
8950                         free_extent_buffer(next);
8951                         return -EIO;
8952                 }
8953                 btrfs_tree_lock(next);
8954                 btrfs_set_lock_blocking(next);
8955         }
8956
8957         level--;
8958         BUG_ON(level != btrfs_header_level(next));
8959         path->nodes[level] = next;
8960         path->slots[level] = 0;
8961         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8962         wc->level = level;
8963         if (wc->level == 1)
8964                 wc->reada_slot = 0;
8965         return 0;
8966 skip:
8967         wc->refs[level - 1] = 0;
8968         wc->flags[level - 1] = 0;
8969         if (wc->stage == DROP_REFERENCE) {
8970                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8971                         parent = path->nodes[level]->start;
8972                 } else {
8973                         BUG_ON(root->root_key.objectid !=
8974                                btrfs_header_owner(path->nodes[level]));
8975                         parent = 0;
8976                 }
8977
8978                 if (need_account) {
8979                         ret = account_shared_subtree(trans, root, next,
8980                                                      generation, level - 1);
8981                         if (ret) {
8982                                 btrfs_err_rl(root->fs_info,
8983                                         "Error "
8984                                         "%d accounting shared subtree. Quota "
8985                                         "is out of sync, rescan required.",
8986                                         ret);
8987                         }
8988                 }
8989                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8990                                 root->root_key.objectid, level - 1, 0);
8991                 BUG_ON(ret); /* -ENOMEM */
8992         }
8993         btrfs_tree_unlock(next);
8994         free_extent_buffer(next);
8995         *lookup_info = 1;
8996         return 1;
8997 }
8998
8999 /*
9000  * helper to process tree block while walking up the tree.
9001  *
9002  * when wc->stage == DROP_REFERENCE, this function drops
9003  * reference count on the block.
9004  *
9005  * when wc->stage == UPDATE_BACKREF, this function changes
9006  * wc->stage back to DROP_REFERENCE if we changed wc->stage
9007  * to UPDATE_BACKREF previously while processing the block.
9008  *
9009  * NOTE: return value 1 means we should stop walking up.
9010  */
9011 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
9012                                  struct btrfs_root *root,
9013                                  struct btrfs_path *path,
9014                                  struct walk_control *wc)
9015 {
9016         int ret;
9017         int level = wc->level;
9018         struct extent_buffer *eb = path->nodes[level];
9019         u64 parent = 0;
9020
9021         if (wc->stage == UPDATE_BACKREF) {
9022                 BUG_ON(wc->shared_level < level);
9023                 if (level < wc->shared_level)
9024                         goto out;
9025
9026                 ret = find_next_key(path, level + 1, &wc->update_progress);
9027                 if (ret > 0)
9028                         wc->update_ref = 0;
9029
9030                 wc->stage = DROP_REFERENCE;
9031                 wc->shared_level = -1;
9032                 path->slots[level] = 0;
9033
9034                 /*
9035                  * check reference count again if the block isn't locked.
9036                  * we should start walking down the tree again if reference
9037                  * count is one.
9038                  */
9039                 if (!path->locks[level]) {
9040                         BUG_ON(level == 0);
9041                         btrfs_tree_lock(eb);
9042                         btrfs_set_lock_blocking(eb);
9043                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9044
9045                         ret = btrfs_lookup_extent_info(trans, root,
9046                                                        eb->start, level, 1,
9047                                                        &wc->refs[level],
9048                                                        &wc->flags[level]);
9049                         if (ret < 0) {
9050                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9051                                 path->locks[level] = 0;
9052                                 return ret;
9053                         }
9054                         BUG_ON(wc->refs[level] == 0);
9055                         if (wc->refs[level] == 1) {
9056                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9057                                 path->locks[level] = 0;
9058                                 return 1;
9059                         }
9060                 }
9061         }
9062
9063         /* wc->stage == DROP_REFERENCE */
9064         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
9065
9066         if (wc->refs[level] == 1) {
9067                 if (level == 0) {
9068                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9069                                 ret = btrfs_dec_ref(trans, root, eb, 1);
9070                         else
9071                                 ret = btrfs_dec_ref(trans, root, eb, 0);
9072                         BUG_ON(ret); /* -ENOMEM */
9073                         ret = account_leaf_items(trans, root, eb);
9074                         if (ret) {
9075                                 btrfs_err_rl(root->fs_info,
9076                                         "error "
9077                                         "%d accounting leaf items. Quota "
9078                                         "is out of sync, rescan required.",
9079                                         ret);
9080                         }
9081                 }
9082                 /* make block locked assertion in clean_tree_block happy */
9083                 if (!path->locks[level] &&
9084                     btrfs_header_generation(eb) == trans->transid) {
9085                         btrfs_tree_lock(eb);
9086                         btrfs_set_lock_blocking(eb);
9087                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9088                 }
9089                 clean_tree_block(trans, root->fs_info, eb);
9090         }
9091
9092         if (eb == root->node) {
9093                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9094                         parent = eb->start;
9095                 else
9096                         BUG_ON(root->root_key.objectid !=
9097                                btrfs_header_owner(eb));
9098         } else {
9099                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9100                         parent = path->nodes[level + 1]->start;
9101                 else
9102                         BUG_ON(root->root_key.objectid !=
9103                                btrfs_header_owner(path->nodes[level + 1]));
9104         }
9105
9106         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
9107 out:
9108         wc->refs[level] = 0;
9109         wc->flags[level] = 0;
9110         return 0;
9111 }
9112
9113 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
9114                                    struct btrfs_root *root,
9115                                    struct btrfs_path *path,
9116                                    struct walk_control *wc)
9117 {
9118         int level = wc->level;
9119         int lookup_info = 1;
9120         int ret;
9121
9122         while (level >= 0) {
9123                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
9124                 if (ret > 0)
9125                         break;
9126
9127                 if (level == 0)
9128                         break;
9129
9130                 if (path->slots[level] >=
9131                     btrfs_header_nritems(path->nodes[level]))
9132                         break;
9133
9134                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
9135                 if (ret > 0) {
9136                         path->slots[level]++;
9137                         continue;
9138                 } else if (ret < 0)
9139                         return ret;
9140                 level = wc->level;
9141         }
9142         return 0;
9143 }
9144
9145 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
9146                                  struct btrfs_root *root,
9147                                  struct btrfs_path *path,
9148                                  struct walk_control *wc, int max_level)
9149 {
9150         int level = wc->level;
9151         int ret;
9152
9153         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
9154         while (level < max_level && path->nodes[level]) {
9155                 wc->level = level;
9156                 if (path->slots[level] + 1 <
9157                     btrfs_header_nritems(path->nodes[level])) {
9158                         path->slots[level]++;
9159                         return 0;
9160                 } else {
9161                         ret = walk_up_proc(trans, root, path, wc);
9162                         if (ret > 0)
9163                                 return 0;
9164
9165                         if (path->locks[level]) {
9166                                 btrfs_tree_unlock_rw(path->nodes[level],
9167                                                      path->locks[level]);
9168                                 path->locks[level] = 0;
9169                         }
9170                         free_extent_buffer(path->nodes[level]);
9171                         path->nodes[level] = NULL;
9172                         level++;
9173                 }
9174         }
9175         return 1;
9176 }
9177
9178 /*
9179  * drop a subvolume tree.
9180  *
9181  * this function traverses the tree freeing any blocks that only
9182  * referenced by the tree.
9183  *
9184  * when a shared tree block is found. this function decreases its
9185  * reference count by one. if update_ref is true, this function
9186  * also make sure backrefs for the shared block and all lower level
9187  * blocks are properly updated.
9188  *
9189  * If called with for_reloc == 0, may exit early with -EAGAIN
9190  */
9191 int btrfs_drop_snapshot(struct btrfs_root *root,
9192                          struct btrfs_block_rsv *block_rsv, int update_ref,
9193                          int for_reloc)
9194 {
9195         struct btrfs_path *path;
9196         struct btrfs_trans_handle *trans;
9197         struct btrfs_root *tree_root = root->fs_info->tree_root;
9198         struct btrfs_root_item *root_item = &root->root_item;
9199         struct walk_control *wc;
9200         struct btrfs_key key;
9201         int err = 0;
9202         int ret;
9203         int level;
9204         bool root_dropped = false;
9205
9206         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
9207
9208         path = btrfs_alloc_path();
9209         if (!path) {
9210                 err = -ENOMEM;
9211                 goto out;
9212         }
9213
9214         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9215         if (!wc) {
9216                 btrfs_free_path(path);
9217                 err = -ENOMEM;
9218                 goto out;
9219         }
9220
9221         trans = btrfs_start_transaction(tree_root, 0);
9222         if (IS_ERR(trans)) {
9223                 err = PTR_ERR(trans);
9224                 goto out_free;
9225         }
9226
9227         if (block_rsv)
9228                 trans->block_rsv = block_rsv;
9229
9230         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9231                 level = btrfs_header_level(root->node);
9232                 path->nodes[level] = btrfs_lock_root_node(root);
9233                 btrfs_set_lock_blocking(path->nodes[level]);
9234                 path->slots[level] = 0;
9235                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9236                 memset(&wc->update_progress, 0,
9237                        sizeof(wc->update_progress));
9238         } else {
9239                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9240                 memcpy(&wc->update_progress, &key,
9241                        sizeof(wc->update_progress));
9242
9243                 level = root_item->drop_level;
9244                 BUG_ON(level == 0);
9245                 path->lowest_level = level;
9246                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9247                 path->lowest_level = 0;
9248                 if (ret < 0) {
9249                         err = ret;
9250                         goto out_end_trans;
9251                 }
9252                 WARN_ON(ret > 0);
9253
9254                 /*
9255                  * unlock our path, this is safe because only this
9256                  * function is allowed to delete this snapshot
9257                  */
9258                 btrfs_unlock_up_safe(path, 0);
9259
9260                 level = btrfs_header_level(root->node);
9261                 while (1) {
9262                         btrfs_tree_lock(path->nodes[level]);
9263                         btrfs_set_lock_blocking(path->nodes[level]);
9264                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9265
9266                         ret = btrfs_lookup_extent_info(trans, root,
9267                                                 path->nodes[level]->start,
9268                                                 level, 1, &wc->refs[level],
9269                                                 &wc->flags[level]);
9270                         if (ret < 0) {
9271                                 err = ret;
9272                                 goto out_end_trans;
9273                         }
9274                         BUG_ON(wc->refs[level] == 0);
9275
9276                         if (level == root_item->drop_level)
9277                                 break;
9278
9279                         btrfs_tree_unlock(path->nodes[level]);
9280                         path->locks[level] = 0;
9281                         WARN_ON(wc->refs[level] != 1);
9282                         level--;
9283                 }
9284         }
9285
9286         wc->level = level;
9287         wc->shared_level = -1;
9288         wc->stage = DROP_REFERENCE;
9289         wc->update_ref = update_ref;
9290         wc->keep_locks = 0;
9291         wc->for_reloc = for_reloc;
9292         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9293
9294         while (1) {
9295
9296                 ret = walk_down_tree(trans, root, path, wc);
9297                 if (ret < 0) {
9298                         err = ret;
9299                         break;
9300                 }
9301
9302                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9303                 if (ret < 0) {
9304                         err = ret;
9305                         break;
9306                 }
9307
9308                 if (ret > 0) {
9309                         BUG_ON(wc->stage != DROP_REFERENCE);
9310                         break;
9311                 }
9312
9313                 if (wc->stage == DROP_REFERENCE) {
9314                         level = wc->level;
9315                         btrfs_node_key(path->nodes[level],
9316                                        &root_item->drop_progress,
9317                                        path->slots[level]);
9318                         root_item->drop_level = level;
9319                 }
9320
9321                 BUG_ON(wc->level == 0);
9322                 if (btrfs_should_end_transaction(trans, tree_root) ||
9323                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
9324                         ret = btrfs_update_root(trans, tree_root,
9325                                                 &root->root_key,
9326                                                 root_item);
9327                         if (ret) {
9328                                 btrfs_abort_transaction(trans, tree_root, ret);
9329                                 err = ret;
9330                                 goto out_end_trans;
9331                         }
9332
9333                         btrfs_end_transaction_throttle(trans, tree_root);
9334                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
9335                                 pr_debug("BTRFS: drop snapshot early exit\n");
9336                                 err = -EAGAIN;
9337                                 goto out_free;
9338                         }
9339
9340                         trans = btrfs_start_transaction(tree_root, 0);
9341                         if (IS_ERR(trans)) {
9342                                 err = PTR_ERR(trans);
9343                                 goto out_free;
9344                         }
9345                         if (block_rsv)
9346                                 trans->block_rsv = block_rsv;
9347                 }
9348         }
9349         btrfs_release_path(path);
9350         if (err)
9351                 goto out_end_trans;
9352
9353         ret = btrfs_del_root(trans, tree_root, &root->root_key);
9354         if (ret) {
9355                 btrfs_abort_transaction(trans, tree_root, ret);
9356                 goto out_end_trans;
9357         }
9358
9359         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9360                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9361                                       NULL, NULL);
9362                 if (ret < 0) {
9363                         btrfs_abort_transaction(trans, tree_root, ret);
9364                         err = ret;
9365                         goto out_end_trans;
9366                 } else if (ret > 0) {
9367                         /* if we fail to delete the orphan item this time
9368                          * around, it'll get picked up the next time.
9369                          *
9370                          * The most common failure here is just -ENOENT.
9371                          */
9372                         btrfs_del_orphan_item(trans, tree_root,
9373                                               root->root_key.objectid);
9374                 }
9375         }
9376
9377         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9378                 btrfs_add_dropped_root(trans, root);
9379         } else {
9380                 free_extent_buffer(root->node);
9381                 free_extent_buffer(root->commit_root);
9382                 btrfs_put_fs_root(root);
9383         }
9384         root_dropped = true;
9385 out_end_trans:
9386         btrfs_end_transaction_throttle(trans, tree_root);
9387 out_free:
9388         kfree(wc);
9389         btrfs_free_path(path);
9390 out:
9391         /*
9392          * So if we need to stop dropping the snapshot for whatever reason we
9393          * need to make sure to add it back to the dead root list so that we
9394          * keep trying to do the work later.  This also cleans up roots if we
9395          * don't have it in the radix (like when we recover after a power fail
9396          * or unmount) so we don't leak memory.
9397          */
9398         if (!for_reloc && root_dropped == false)
9399                 btrfs_add_dead_root(root);
9400         if (err && err != -EAGAIN)
9401                 btrfs_handle_fs_error(root->fs_info, err, NULL);
9402         return err;
9403 }
9404
9405 /*
9406  * drop subtree rooted at tree block 'node'.
9407  *
9408  * NOTE: this function will unlock and release tree block 'node'
9409  * only used by relocation code
9410  */
9411 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9412                         struct btrfs_root *root,
9413                         struct extent_buffer *node,
9414                         struct extent_buffer *parent)
9415 {
9416         struct btrfs_path *path;
9417         struct walk_control *wc;
9418         int level;
9419         int parent_level;
9420         int ret = 0;
9421         int wret;
9422
9423         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9424
9425         path = btrfs_alloc_path();
9426         if (!path)
9427                 return -ENOMEM;
9428
9429         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9430         if (!wc) {
9431                 btrfs_free_path(path);
9432                 return -ENOMEM;
9433         }
9434
9435         btrfs_assert_tree_locked(parent);
9436         parent_level = btrfs_header_level(parent);
9437         extent_buffer_get(parent);
9438         path->nodes[parent_level] = parent;
9439         path->slots[parent_level] = btrfs_header_nritems(parent);
9440
9441         btrfs_assert_tree_locked(node);
9442         level = btrfs_header_level(node);
9443         path->nodes[level] = node;
9444         path->slots[level] = 0;
9445         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9446
9447         wc->refs[parent_level] = 1;
9448         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9449         wc->level = level;
9450         wc->shared_level = -1;
9451         wc->stage = DROP_REFERENCE;
9452         wc->update_ref = 0;
9453         wc->keep_locks = 1;
9454         wc->for_reloc = 1;
9455         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9456
9457         while (1) {
9458                 wret = walk_down_tree(trans, root, path, wc);
9459                 if (wret < 0) {
9460                         ret = wret;
9461                         break;
9462                 }
9463
9464                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9465                 if (wret < 0)
9466                         ret = wret;
9467                 if (wret != 0)
9468                         break;
9469         }
9470
9471         kfree(wc);
9472         btrfs_free_path(path);
9473         return ret;
9474 }
9475
9476 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9477 {
9478         u64 num_devices;
9479         u64 stripped;
9480
9481         /*
9482          * if restripe for this chunk_type is on pick target profile and
9483          * return, otherwise do the usual balance
9484          */
9485         stripped = get_restripe_target(root->fs_info, flags);
9486         if (stripped)
9487                 return extended_to_chunk(stripped);
9488
9489         num_devices = root->fs_info->fs_devices->rw_devices;
9490
9491         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9492                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9493                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9494
9495         if (num_devices == 1) {
9496                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9497                 stripped = flags & ~stripped;
9498
9499                 /* turn raid0 into single device chunks */
9500                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9501                         return stripped;
9502
9503                 /* turn mirroring into duplication */
9504                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9505                              BTRFS_BLOCK_GROUP_RAID10))
9506                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9507         } else {
9508                 /* they already had raid on here, just return */
9509                 if (flags & stripped)
9510                         return flags;
9511
9512                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9513                 stripped = flags & ~stripped;
9514
9515                 /* switch duplicated blocks with raid1 */
9516                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9517                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9518
9519                 /* this is drive concat, leave it alone */
9520         }
9521
9522         return flags;
9523 }
9524
9525 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9526 {
9527         struct btrfs_space_info *sinfo = cache->space_info;
9528         u64 num_bytes;
9529         u64 min_allocable_bytes;
9530         int ret = -ENOSPC;
9531
9532         /*
9533          * We need some metadata space and system metadata space for
9534          * allocating chunks in some corner cases until we force to set
9535          * it to be readonly.
9536          */
9537         if ((sinfo->flags &
9538              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9539             !force)
9540                 min_allocable_bytes = SZ_1M;
9541         else
9542                 min_allocable_bytes = 0;
9543
9544         spin_lock(&sinfo->lock);
9545         spin_lock(&cache->lock);
9546
9547         if (cache->ro) {
9548                 cache->ro++;
9549                 ret = 0;
9550                 goto out;
9551         }
9552
9553         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9554                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9555
9556         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9557             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9558             min_allocable_bytes <= sinfo->total_bytes) {
9559                 sinfo->bytes_readonly += num_bytes;
9560                 cache->ro++;
9561                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9562                 ret = 0;
9563         }
9564 out:
9565         spin_unlock(&cache->lock);
9566         spin_unlock(&sinfo->lock);
9567         return ret;
9568 }
9569
9570 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9571                              struct btrfs_block_group_cache *cache)
9572
9573 {
9574         struct btrfs_trans_handle *trans;
9575         u64 alloc_flags;
9576         int ret;
9577
9578 again:
9579         trans = btrfs_join_transaction(root);
9580         if (IS_ERR(trans))
9581                 return PTR_ERR(trans);
9582
9583         /*
9584          * we're not allowed to set block groups readonly after the dirty
9585          * block groups cache has started writing.  If it already started,
9586          * back off and let this transaction commit
9587          */
9588         mutex_lock(&root->fs_info->ro_block_group_mutex);
9589         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9590                 u64 transid = trans->transid;
9591
9592                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9593                 btrfs_end_transaction(trans, root);
9594
9595                 ret = btrfs_wait_for_commit(root, transid);
9596                 if (ret)
9597                         return ret;
9598                 goto again;
9599         }
9600
9601         /*
9602          * if we are changing raid levels, try to allocate a corresponding
9603          * block group with the new raid level.
9604          */
9605         alloc_flags = update_block_group_flags(root, cache->flags);
9606         if (alloc_flags != cache->flags) {
9607                 ret = do_chunk_alloc(trans, root, alloc_flags,
9608                                      CHUNK_ALLOC_FORCE);
9609                 /*
9610                  * ENOSPC is allowed here, we may have enough space
9611                  * already allocated at the new raid level to
9612                  * carry on
9613                  */
9614                 if (ret == -ENOSPC)
9615                         ret = 0;
9616                 if (ret < 0)
9617                         goto out;
9618         }
9619
9620         ret = inc_block_group_ro(cache, 0);
9621         if (!ret)
9622                 goto out;
9623         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9624         ret = do_chunk_alloc(trans, root, alloc_flags,
9625                              CHUNK_ALLOC_FORCE);
9626         if (ret < 0)
9627                 goto out;
9628         ret = inc_block_group_ro(cache, 0);
9629 out:
9630         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9631                 alloc_flags = update_block_group_flags(root, cache->flags);
9632                 lock_chunks(root->fs_info->chunk_root);
9633                 check_system_chunk(trans, root, alloc_flags);
9634                 unlock_chunks(root->fs_info->chunk_root);
9635         }
9636         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9637
9638         btrfs_end_transaction(trans, root);
9639         return ret;
9640 }
9641
9642 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9643                             struct btrfs_root *root, u64 type)
9644 {
9645         u64 alloc_flags = get_alloc_profile(root, type);
9646         return do_chunk_alloc(trans, root, alloc_flags,
9647                               CHUNK_ALLOC_FORCE);
9648 }
9649
9650 /*
9651  * helper to account the unused space of all the readonly block group in the
9652  * space_info. takes mirrors into account.
9653  */
9654 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9655 {
9656         struct btrfs_block_group_cache *block_group;
9657         u64 free_bytes = 0;
9658         int factor;
9659
9660         /* It's df, we don't care if it's racy */
9661         if (list_empty(&sinfo->ro_bgs))
9662                 return 0;
9663
9664         spin_lock(&sinfo->lock);
9665         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9666                 spin_lock(&block_group->lock);
9667
9668                 if (!block_group->ro) {
9669                         spin_unlock(&block_group->lock);
9670                         continue;
9671                 }
9672
9673                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9674                                           BTRFS_BLOCK_GROUP_RAID10 |
9675                                           BTRFS_BLOCK_GROUP_DUP))
9676                         factor = 2;
9677                 else
9678                         factor = 1;
9679
9680                 free_bytes += (block_group->key.offset -
9681                                btrfs_block_group_used(&block_group->item)) *
9682                                factor;
9683
9684                 spin_unlock(&block_group->lock);
9685         }
9686         spin_unlock(&sinfo->lock);
9687
9688         return free_bytes;
9689 }
9690
9691 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9692                               struct btrfs_block_group_cache *cache)
9693 {
9694         struct btrfs_space_info *sinfo = cache->space_info;
9695         u64 num_bytes;
9696
9697         BUG_ON(!cache->ro);
9698
9699         spin_lock(&sinfo->lock);
9700         spin_lock(&cache->lock);
9701         if (!--cache->ro) {
9702                 num_bytes = cache->key.offset - cache->reserved -
9703                             cache->pinned - cache->bytes_super -
9704                             btrfs_block_group_used(&cache->item);
9705                 sinfo->bytes_readonly -= num_bytes;
9706                 list_del_init(&cache->ro_list);
9707         }
9708         spin_unlock(&cache->lock);
9709         spin_unlock(&sinfo->lock);
9710 }
9711
9712 /*
9713  * checks to see if its even possible to relocate this block group.
9714  *
9715  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9716  * ok to go ahead and try.
9717  */
9718 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9719 {
9720         struct btrfs_block_group_cache *block_group;
9721         struct btrfs_space_info *space_info;
9722         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9723         struct btrfs_device *device;
9724         struct btrfs_trans_handle *trans;
9725         u64 min_free;
9726         u64 dev_min = 1;
9727         u64 dev_nr = 0;
9728         u64 target;
9729         int debug;
9730         int index;
9731         int full = 0;
9732         int ret = 0;
9733
9734         debug = btrfs_test_opt(root, ENOSPC_DEBUG);
9735
9736         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9737
9738         /* odd, couldn't find the block group, leave it alone */
9739         if (!block_group) {
9740                 if (debug)
9741                         btrfs_warn(root->fs_info,
9742                                    "can't find block group for bytenr %llu",
9743                                    bytenr);
9744                 return -1;
9745         }
9746
9747         min_free = btrfs_block_group_used(&block_group->item);
9748
9749         /* no bytes used, we're good */
9750         if (!min_free)
9751                 goto out;
9752
9753         space_info = block_group->space_info;
9754         spin_lock(&space_info->lock);
9755
9756         full = space_info->full;
9757
9758         /*
9759          * if this is the last block group we have in this space, we can't
9760          * relocate it unless we're able to allocate a new chunk below.
9761          *
9762          * Otherwise, we need to make sure we have room in the space to handle
9763          * all of the extents from this block group.  If we can, we're good
9764          */
9765         if ((space_info->total_bytes != block_group->key.offset) &&
9766             (space_info->bytes_used + space_info->bytes_reserved +
9767              space_info->bytes_pinned + space_info->bytes_readonly +
9768              min_free < space_info->total_bytes)) {
9769                 spin_unlock(&space_info->lock);
9770                 goto out;
9771         }
9772         spin_unlock(&space_info->lock);
9773
9774         /*
9775          * ok we don't have enough space, but maybe we have free space on our
9776          * devices to allocate new chunks for relocation, so loop through our
9777          * alloc devices and guess if we have enough space.  if this block
9778          * group is going to be restriped, run checks against the target
9779          * profile instead of the current one.
9780          */
9781         ret = -1;
9782
9783         /*
9784          * index:
9785          *      0: raid10
9786          *      1: raid1
9787          *      2: dup
9788          *      3: raid0
9789          *      4: single
9790          */
9791         target = get_restripe_target(root->fs_info, block_group->flags);
9792         if (target) {
9793                 index = __get_raid_index(extended_to_chunk(target));
9794         } else {
9795                 /*
9796                  * this is just a balance, so if we were marked as full
9797                  * we know there is no space for a new chunk
9798                  */
9799                 if (full) {
9800                         if (debug)
9801                                 btrfs_warn(root->fs_info,
9802                                         "no space to alloc new chunk for block group %llu",
9803                                         block_group->key.objectid);
9804                         goto out;
9805                 }
9806
9807                 index = get_block_group_index(block_group);
9808         }
9809
9810         if (index == BTRFS_RAID_RAID10) {
9811                 dev_min = 4;
9812                 /* Divide by 2 */
9813                 min_free >>= 1;
9814         } else if (index == BTRFS_RAID_RAID1) {
9815                 dev_min = 2;
9816         } else if (index == BTRFS_RAID_DUP) {
9817                 /* Multiply by 2 */
9818                 min_free <<= 1;
9819         } else if (index == BTRFS_RAID_RAID0) {
9820                 dev_min = fs_devices->rw_devices;
9821                 min_free = div64_u64(min_free, dev_min);
9822         }
9823
9824         /* We need to do this so that we can look at pending chunks */
9825         trans = btrfs_join_transaction(root);
9826         if (IS_ERR(trans)) {
9827                 ret = PTR_ERR(trans);
9828                 goto out;
9829         }
9830
9831         mutex_lock(&root->fs_info->chunk_mutex);
9832         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9833                 u64 dev_offset;
9834
9835                 /*
9836                  * check to make sure we can actually find a chunk with enough
9837                  * space to fit our block group in.
9838                  */
9839                 if (device->total_bytes > device->bytes_used + min_free &&
9840                     !device->is_tgtdev_for_dev_replace) {
9841                         ret = find_free_dev_extent(trans, device, min_free,
9842                                                    &dev_offset, NULL);
9843                         if (!ret)
9844                                 dev_nr++;
9845
9846                         if (dev_nr >= dev_min)
9847                                 break;
9848
9849                         ret = -1;
9850                 }
9851         }
9852         if (debug && ret == -1)
9853                 btrfs_warn(root->fs_info,
9854                         "no space to allocate a new chunk for block group %llu",
9855                         block_group->key.objectid);
9856         mutex_unlock(&root->fs_info->chunk_mutex);
9857         btrfs_end_transaction(trans, root);
9858 out:
9859         btrfs_put_block_group(block_group);
9860         return ret;
9861 }
9862
9863 static int find_first_block_group(struct btrfs_root *root,
9864                 struct btrfs_path *path, struct btrfs_key *key)
9865 {
9866         int ret = 0;
9867         struct btrfs_key found_key;
9868         struct extent_buffer *leaf;
9869         int slot;
9870
9871         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9872         if (ret < 0)
9873                 goto out;
9874
9875         while (1) {
9876                 slot = path->slots[0];
9877                 leaf = path->nodes[0];
9878                 if (slot >= btrfs_header_nritems(leaf)) {
9879                         ret = btrfs_next_leaf(root, path);
9880                         if (ret == 0)
9881                                 continue;
9882                         if (ret < 0)
9883                                 goto out;
9884                         break;
9885                 }
9886                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9887
9888                 if (found_key.objectid >= key->objectid &&
9889                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9890                         ret = 0;
9891                         goto out;
9892                 }
9893                 path->slots[0]++;
9894         }
9895 out:
9896         return ret;
9897 }
9898
9899 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9900 {
9901         struct btrfs_block_group_cache *block_group;
9902         u64 last = 0;
9903
9904         while (1) {
9905                 struct inode *inode;
9906
9907                 block_group = btrfs_lookup_first_block_group(info, last);
9908                 while (block_group) {
9909                         spin_lock(&block_group->lock);
9910                         if (block_group->iref)
9911                                 break;
9912                         spin_unlock(&block_group->lock);
9913                         block_group = next_block_group(info->tree_root,
9914                                                        block_group);
9915                 }
9916                 if (!block_group) {
9917                         if (last == 0)
9918                                 break;
9919                         last = 0;
9920                         continue;
9921                 }
9922
9923                 inode = block_group->inode;
9924                 block_group->iref = 0;
9925                 block_group->inode = NULL;
9926                 spin_unlock(&block_group->lock);
9927                 iput(inode);
9928                 last = block_group->key.objectid + block_group->key.offset;
9929                 btrfs_put_block_group(block_group);
9930         }
9931 }
9932
9933 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9934 {
9935         struct btrfs_block_group_cache *block_group;
9936         struct btrfs_space_info *space_info;
9937         struct btrfs_caching_control *caching_ctl;
9938         struct rb_node *n;
9939
9940         down_write(&info->commit_root_sem);
9941         while (!list_empty(&info->caching_block_groups)) {
9942                 caching_ctl = list_entry(info->caching_block_groups.next,
9943                                          struct btrfs_caching_control, list);
9944                 list_del(&caching_ctl->list);
9945                 put_caching_control(caching_ctl);
9946         }
9947         up_write(&info->commit_root_sem);
9948
9949         spin_lock(&info->unused_bgs_lock);
9950         while (!list_empty(&info->unused_bgs)) {
9951                 block_group = list_first_entry(&info->unused_bgs,
9952                                                struct btrfs_block_group_cache,
9953                                                bg_list);
9954                 list_del_init(&block_group->bg_list);
9955                 btrfs_put_block_group(block_group);
9956         }
9957         spin_unlock(&info->unused_bgs_lock);
9958
9959         spin_lock(&info->block_group_cache_lock);
9960         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9961                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9962                                        cache_node);
9963                 rb_erase(&block_group->cache_node,
9964                          &info->block_group_cache_tree);
9965                 RB_CLEAR_NODE(&block_group->cache_node);
9966                 spin_unlock(&info->block_group_cache_lock);
9967
9968                 down_write(&block_group->space_info->groups_sem);
9969                 list_del(&block_group->list);
9970                 up_write(&block_group->space_info->groups_sem);
9971
9972                 if (block_group->cached == BTRFS_CACHE_STARTED)
9973                         wait_block_group_cache_done(block_group);
9974
9975                 /*
9976                  * We haven't cached this block group, which means we could
9977                  * possibly have excluded extents on this block group.
9978                  */
9979                 if (block_group->cached == BTRFS_CACHE_NO ||
9980                     block_group->cached == BTRFS_CACHE_ERROR)
9981                         free_excluded_extents(info->extent_root, block_group);
9982
9983                 btrfs_remove_free_space_cache(block_group);
9984                 btrfs_put_block_group(block_group);
9985
9986                 spin_lock(&info->block_group_cache_lock);
9987         }
9988         spin_unlock(&info->block_group_cache_lock);
9989
9990         /* now that all the block groups are freed, go through and
9991          * free all the space_info structs.  This is only called during
9992          * the final stages of unmount, and so we know nobody is
9993          * using them.  We call synchronize_rcu() once before we start,
9994          * just to be on the safe side.
9995          */
9996         synchronize_rcu();
9997
9998         release_global_block_rsv(info);
9999
10000         while (!list_empty(&info->space_info)) {
10001                 int i;
10002
10003                 space_info = list_entry(info->space_info.next,
10004                                         struct btrfs_space_info,
10005                                         list);
10006
10007                 /*
10008                  * Do not hide this behind enospc_debug, this is actually
10009                  * important and indicates a real bug if this happens.
10010                  */
10011                 if (WARN_ON(space_info->bytes_pinned > 0 ||
10012                             space_info->bytes_reserved > 0 ||
10013                             space_info->bytes_may_use > 0))
10014                         dump_space_info(space_info, 0, 0);
10015                 list_del(&space_info->list);
10016                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
10017                         struct kobject *kobj;
10018                         kobj = space_info->block_group_kobjs[i];
10019                         space_info->block_group_kobjs[i] = NULL;
10020                         if (kobj) {
10021                                 kobject_del(kobj);
10022                                 kobject_put(kobj);
10023                         }
10024                 }
10025                 kobject_del(&space_info->kobj);
10026                 kobject_put(&space_info->kobj);
10027         }
10028         return 0;
10029 }
10030
10031 static void __link_block_group(struct btrfs_space_info *space_info,
10032                                struct btrfs_block_group_cache *cache)
10033 {
10034         int index = get_block_group_index(cache);
10035         bool first = false;
10036
10037         down_write(&space_info->groups_sem);
10038         if (list_empty(&space_info->block_groups[index]))
10039                 first = true;
10040         list_add_tail(&cache->list, &space_info->block_groups[index]);
10041         up_write(&space_info->groups_sem);
10042
10043         if (first) {
10044                 struct raid_kobject *rkobj;
10045                 int ret;
10046
10047                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
10048                 if (!rkobj)
10049                         goto out_err;
10050                 rkobj->raid_type = index;
10051                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
10052                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
10053                                   "%s", get_raid_name(index));
10054                 if (ret) {
10055                         kobject_put(&rkobj->kobj);
10056                         goto out_err;
10057                 }
10058                 space_info->block_group_kobjs[index] = &rkobj->kobj;
10059         }
10060
10061         return;
10062 out_err:
10063         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
10064 }
10065
10066 static struct btrfs_block_group_cache *
10067 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
10068 {
10069         struct btrfs_block_group_cache *cache;
10070
10071         cache = kzalloc(sizeof(*cache), GFP_NOFS);
10072         if (!cache)
10073                 return NULL;
10074
10075         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
10076                                         GFP_NOFS);
10077         if (!cache->free_space_ctl) {
10078                 kfree(cache);
10079                 return NULL;
10080         }
10081
10082         cache->key.objectid = start;
10083         cache->key.offset = size;
10084         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10085
10086         cache->sectorsize = root->sectorsize;
10087         cache->fs_info = root->fs_info;
10088         cache->full_stripe_len = btrfs_full_stripe_len(root,
10089                                                &root->fs_info->mapping_tree,
10090                                                start);
10091         set_free_space_tree_thresholds(cache);
10092
10093         atomic_set(&cache->count, 1);
10094         spin_lock_init(&cache->lock);
10095         init_rwsem(&cache->data_rwsem);
10096         INIT_LIST_HEAD(&cache->list);
10097         INIT_LIST_HEAD(&cache->cluster_list);
10098         INIT_LIST_HEAD(&cache->bg_list);
10099         INIT_LIST_HEAD(&cache->ro_list);
10100         INIT_LIST_HEAD(&cache->dirty_list);
10101         INIT_LIST_HEAD(&cache->io_list);
10102         btrfs_init_free_space_ctl(cache);
10103         atomic_set(&cache->trimming, 0);
10104         mutex_init(&cache->free_space_lock);
10105
10106         return cache;
10107 }
10108
10109 int btrfs_read_block_groups(struct btrfs_root *root)
10110 {
10111         struct btrfs_path *path;
10112         int ret;
10113         struct btrfs_block_group_cache *cache;
10114         struct btrfs_fs_info *info = root->fs_info;
10115         struct btrfs_space_info *space_info;
10116         struct btrfs_key key;
10117         struct btrfs_key found_key;
10118         struct extent_buffer *leaf;
10119         int need_clear = 0;
10120         u64 cache_gen;
10121
10122         root = info->extent_root;
10123         key.objectid = 0;
10124         key.offset = 0;
10125         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10126         path = btrfs_alloc_path();
10127         if (!path)
10128                 return -ENOMEM;
10129         path->reada = READA_FORWARD;
10130
10131         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
10132         if (btrfs_test_opt(root, SPACE_CACHE) &&
10133             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
10134                 need_clear = 1;
10135         if (btrfs_test_opt(root, CLEAR_CACHE))
10136                 need_clear = 1;
10137
10138         while (1) {
10139                 ret = find_first_block_group(root, path, &key);
10140                 if (ret > 0)
10141                         break;
10142                 if (ret != 0)
10143                         goto error;
10144
10145                 leaf = path->nodes[0];
10146                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10147
10148                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
10149                                                        found_key.offset);
10150                 if (!cache) {
10151                         ret = -ENOMEM;
10152                         goto error;
10153                 }
10154
10155                 if (need_clear) {
10156                         /*
10157                          * When we mount with old space cache, we need to
10158                          * set BTRFS_DC_CLEAR and set dirty flag.
10159                          *
10160                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
10161                          *    truncate the old free space cache inode and
10162                          *    setup a new one.
10163                          * b) Setting 'dirty flag' makes sure that we flush
10164                          *    the new space cache info onto disk.
10165                          */
10166                         if (btrfs_test_opt(root, SPACE_CACHE))
10167                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
10168                 }
10169
10170                 read_extent_buffer(leaf, &cache->item,
10171                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
10172                                    sizeof(cache->item));
10173                 cache->flags = btrfs_block_group_flags(&cache->item);
10174
10175                 key.objectid = found_key.objectid + found_key.offset;
10176                 btrfs_release_path(path);
10177
10178                 /*
10179                  * We need to exclude the super stripes now so that the space
10180                  * info has super bytes accounted for, otherwise we'll think
10181                  * we have more space than we actually do.
10182                  */
10183                 ret = exclude_super_stripes(root, cache);
10184                 if (ret) {
10185                         /*
10186                          * We may have excluded something, so call this just in
10187                          * case.
10188                          */
10189                         free_excluded_extents(root, cache);
10190                         btrfs_put_block_group(cache);
10191                         goto error;
10192                 }
10193
10194                 /*
10195                  * check for two cases, either we are full, and therefore
10196                  * don't need to bother with the caching work since we won't
10197                  * find any space, or we are empty, and we can just add all
10198                  * the space in and be done with it.  This saves us _alot_ of
10199                  * time, particularly in the full case.
10200                  */
10201                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10202                         cache->last_byte_to_unpin = (u64)-1;
10203                         cache->cached = BTRFS_CACHE_FINISHED;
10204                         free_excluded_extents(root, cache);
10205                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10206                         cache->last_byte_to_unpin = (u64)-1;
10207                         cache->cached = BTRFS_CACHE_FINISHED;
10208                         add_new_free_space(cache, root->fs_info,
10209                                            found_key.objectid,
10210                                            found_key.objectid +
10211                                            found_key.offset);
10212                         free_excluded_extents(root, cache);
10213                 }
10214
10215                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
10216                 if (ret) {
10217                         btrfs_remove_free_space_cache(cache);
10218                         btrfs_put_block_group(cache);
10219                         goto error;
10220                 }
10221
10222                 trace_btrfs_add_block_group(root->fs_info, cache, 0);
10223                 ret = update_space_info(info, cache->flags, found_key.offset,
10224                                         btrfs_block_group_used(&cache->item),
10225                                         cache->bytes_super, &space_info);
10226                 if (ret) {
10227                         btrfs_remove_free_space_cache(cache);
10228                         spin_lock(&info->block_group_cache_lock);
10229                         rb_erase(&cache->cache_node,
10230                                  &info->block_group_cache_tree);
10231                         RB_CLEAR_NODE(&cache->cache_node);
10232                         spin_unlock(&info->block_group_cache_lock);
10233                         btrfs_put_block_group(cache);
10234                         goto error;
10235                 }
10236
10237                 cache->space_info = space_info;
10238
10239                 __link_block_group(space_info, cache);
10240
10241                 set_avail_alloc_bits(root->fs_info, cache->flags);
10242                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
10243                         inc_block_group_ro(cache, 1);
10244                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10245                         spin_lock(&info->unused_bgs_lock);
10246                         /* Should always be true but just in case. */
10247                         if (list_empty(&cache->bg_list)) {
10248                                 btrfs_get_block_group(cache);
10249                                 list_add_tail(&cache->bg_list,
10250                                               &info->unused_bgs);
10251                         }
10252                         spin_unlock(&info->unused_bgs_lock);
10253                 }
10254         }
10255
10256         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
10257                 if (!(get_alloc_profile(root, space_info->flags) &
10258                       (BTRFS_BLOCK_GROUP_RAID10 |
10259                        BTRFS_BLOCK_GROUP_RAID1 |
10260                        BTRFS_BLOCK_GROUP_RAID5 |
10261                        BTRFS_BLOCK_GROUP_RAID6 |
10262                        BTRFS_BLOCK_GROUP_DUP)))
10263                         continue;
10264                 /*
10265                  * avoid allocating from un-mirrored block group if there are
10266                  * mirrored block groups.
10267                  */
10268                 list_for_each_entry(cache,
10269                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10270                                 list)
10271                         inc_block_group_ro(cache, 1);
10272                 list_for_each_entry(cache,
10273                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10274                                 list)
10275                         inc_block_group_ro(cache, 1);
10276         }
10277
10278         init_global_block_rsv(info);
10279         ret = 0;
10280 error:
10281         btrfs_free_path(path);
10282         return ret;
10283 }
10284
10285 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10286                                        struct btrfs_root *root)
10287 {
10288         struct btrfs_block_group_cache *block_group, *tmp;
10289         struct btrfs_root *extent_root = root->fs_info->extent_root;
10290         struct btrfs_block_group_item item;
10291         struct btrfs_key key;
10292         int ret = 0;
10293         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10294
10295         trans->can_flush_pending_bgs = false;
10296         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
10297                 if (ret)
10298                         goto next;
10299
10300                 spin_lock(&block_group->lock);
10301                 memcpy(&item, &block_group->item, sizeof(item));
10302                 memcpy(&key, &block_group->key, sizeof(key));
10303                 spin_unlock(&block_group->lock);
10304
10305                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10306                                         sizeof(item));
10307                 if (ret)
10308                         btrfs_abort_transaction(trans, extent_root, ret);
10309                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
10310                                                key.objectid, key.offset);
10311                 if (ret)
10312                         btrfs_abort_transaction(trans, extent_root, ret);
10313                 add_block_group_free_space(trans, root->fs_info, block_group);
10314                 /* already aborted the transaction if it failed. */
10315 next:
10316                 list_del_init(&block_group->bg_list);
10317         }
10318         trans->can_flush_pending_bgs = can_flush_pending_bgs;
10319 }
10320
10321 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10322                            struct btrfs_root *root, u64 bytes_used,
10323                            u64 type, u64 chunk_objectid, u64 chunk_offset,
10324                            u64 size)
10325 {
10326         int ret;
10327         struct btrfs_root *extent_root;
10328         struct btrfs_block_group_cache *cache;
10329         extent_root = root->fs_info->extent_root;
10330
10331         btrfs_set_log_full_commit(root->fs_info, trans);
10332
10333         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
10334         if (!cache)
10335                 return -ENOMEM;
10336
10337         btrfs_set_block_group_used(&cache->item, bytes_used);
10338         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10339         btrfs_set_block_group_flags(&cache->item, type);
10340
10341         cache->flags = type;
10342         cache->last_byte_to_unpin = (u64)-1;
10343         cache->cached = BTRFS_CACHE_FINISHED;
10344         cache->needs_free_space = 1;
10345         ret = exclude_super_stripes(root, cache);
10346         if (ret) {
10347                 /*
10348                  * We may have excluded something, so call this just in
10349                  * case.
10350                  */
10351                 free_excluded_extents(root, cache);
10352                 btrfs_put_block_group(cache);
10353                 return ret;
10354         }
10355
10356         add_new_free_space(cache, root->fs_info, chunk_offset,
10357                            chunk_offset + size);
10358
10359         free_excluded_extents(root, cache);
10360
10361 #ifdef CONFIG_BTRFS_DEBUG
10362         if (btrfs_should_fragment_free_space(root, cache)) {
10363                 u64 new_bytes_used = size - bytes_used;
10364
10365                 bytes_used += new_bytes_used >> 1;
10366                 fragment_free_space(root, cache);
10367         }
10368 #endif
10369         /*
10370          * Call to ensure the corresponding space_info object is created and
10371          * assigned to our block group, but don't update its counters just yet.
10372          * We want our bg to be added to the rbtree with its ->space_info set.
10373          */
10374         ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
10375                                 &cache->space_info);
10376         if (ret) {
10377                 btrfs_remove_free_space_cache(cache);
10378                 btrfs_put_block_group(cache);
10379                 return ret;
10380         }
10381
10382         ret = btrfs_add_block_group_cache(root->fs_info, cache);
10383         if (ret) {
10384                 btrfs_remove_free_space_cache(cache);
10385                 btrfs_put_block_group(cache);
10386                 return ret;
10387         }
10388
10389         /*
10390          * Now that our block group has its ->space_info set and is inserted in
10391          * the rbtree, update the space info's counters.
10392          */
10393         trace_btrfs_add_block_group(root->fs_info, cache, 1);
10394         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10395                                 cache->bytes_super, &cache->space_info);
10396         if (ret) {
10397                 btrfs_remove_free_space_cache(cache);
10398                 spin_lock(&root->fs_info->block_group_cache_lock);
10399                 rb_erase(&cache->cache_node,
10400                          &root->fs_info->block_group_cache_tree);
10401                 RB_CLEAR_NODE(&cache->cache_node);
10402                 spin_unlock(&root->fs_info->block_group_cache_lock);
10403                 btrfs_put_block_group(cache);
10404                 return ret;
10405         }
10406         update_global_block_rsv(root->fs_info);
10407
10408         __link_block_group(cache->space_info, cache);
10409
10410         list_add_tail(&cache->bg_list, &trans->new_bgs);
10411
10412         set_avail_alloc_bits(extent_root->fs_info, type);
10413         return 0;
10414 }
10415
10416 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10417 {
10418         u64 extra_flags = chunk_to_extended(flags) &
10419                                 BTRFS_EXTENDED_PROFILE_MASK;
10420
10421         write_seqlock(&fs_info->profiles_lock);
10422         if (flags & BTRFS_BLOCK_GROUP_DATA)
10423                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10424         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10425                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10426         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10427                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10428         write_sequnlock(&fs_info->profiles_lock);
10429 }
10430
10431 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10432                              struct btrfs_root *root, u64 group_start,
10433                              struct extent_map *em)
10434 {
10435         struct btrfs_path *path;
10436         struct btrfs_block_group_cache *block_group;
10437         struct btrfs_free_cluster *cluster;
10438         struct btrfs_root *tree_root = root->fs_info->tree_root;
10439         struct btrfs_key key;
10440         struct inode *inode;
10441         struct kobject *kobj = NULL;
10442         int ret;
10443         int index;
10444         int factor;
10445         struct btrfs_caching_control *caching_ctl = NULL;
10446         bool remove_em;
10447
10448         root = root->fs_info->extent_root;
10449
10450         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10451         BUG_ON(!block_group);
10452         BUG_ON(!block_group->ro);
10453
10454         /*
10455          * Free the reserved super bytes from this block group before
10456          * remove it.
10457          */
10458         free_excluded_extents(root, block_group);
10459
10460         memcpy(&key, &block_group->key, sizeof(key));
10461         index = get_block_group_index(block_group);
10462         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10463                                   BTRFS_BLOCK_GROUP_RAID1 |
10464                                   BTRFS_BLOCK_GROUP_RAID10))
10465                 factor = 2;
10466         else
10467                 factor = 1;
10468
10469         /* make sure this block group isn't part of an allocation cluster */
10470         cluster = &root->fs_info->data_alloc_cluster;
10471         spin_lock(&cluster->refill_lock);
10472         btrfs_return_cluster_to_free_space(block_group, cluster);
10473         spin_unlock(&cluster->refill_lock);
10474
10475         /*
10476          * make sure this block group isn't part of a metadata
10477          * allocation cluster
10478          */
10479         cluster = &root->fs_info->meta_alloc_cluster;
10480         spin_lock(&cluster->refill_lock);
10481         btrfs_return_cluster_to_free_space(block_group, cluster);
10482         spin_unlock(&cluster->refill_lock);
10483
10484         path = btrfs_alloc_path();
10485         if (!path) {
10486                 ret = -ENOMEM;
10487                 goto out;
10488         }
10489
10490         /*
10491          * get the inode first so any iput calls done for the io_list
10492          * aren't the final iput (no unlinks allowed now)
10493          */
10494         inode = lookup_free_space_inode(tree_root, block_group, path);
10495
10496         mutex_lock(&trans->transaction->cache_write_mutex);
10497         /*
10498          * make sure our free spache cache IO is done before remove the
10499          * free space inode
10500          */
10501         spin_lock(&trans->transaction->dirty_bgs_lock);
10502         if (!list_empty(&block_group->io_list)) {
10503                 list_del_init(&block_group->io_list);
10504
10505                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10506
10507                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10508                 btrfs_wait_cache_io(root, trans, block_group,
10509                                     &block_group->io_ctl, path,
10510                                     block_group->key.objectid);
10511                 btrfs_put_block_group(block_group);
10512                 spin_lock(&trans->transaction->dirty_bgs_lock);
10513         }
10514
10515         if (!list_empty(&block_group->dirty_list)) {
10516                 list_del_init(&block_group->dirty_list);
10517                 btrfs_put_block_group(block_group);
10518         }
10519         spin_unlock(&trans->transaction->dirty_bgs_lock);
10520         mutex_unlock(&trans->transaction->cache_write_mutex);
10521
10522         if (!IS_ERR(inode)) {
10523                 ret = btrfs_orphan_add(trans, inode);
10524                 if (ret) {
10525                         btrfs_add_delayed_iput(inode);
10526                         goto out;
10527                 }
10528                 clear_nlink(inode);
10529                 /* One for the block groups ref */
10530                 spin_lock(&block_group->lock);
10531                 if (block_group->iref) {
10532                         block_group->iref = 0;
10533                         block_group->inode = NULL;
10534                         spin_unlock(&block_group->lock);
10535                         iput(inode);
10536                 } else {
10537                         spin_unlock(&block_group->lock);
10538                 }
10539                 /* One for our lookup ref */
10540                 btrfs_add_delayed_iput(inode);
10541         }
10542
10543         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10544         key.offset = block_group->key.objectid;
10545         key.type = 0;
10546
10547         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10548         if (ret < 0)
10549                 goto out;
10550         if (ret > 0)
10551                 btrfs_release_path(path);
10552         if (ret == 0) {
10553                 ret = btrfs_del_item(trans, tree_root, path);
10554                 if (ret)
10555                         goto out;
10556                 btrfs_release_path(path);
10557         }
10558
10559         spin_lock(&root->fs_info->block_group_cache_lock);
10560         rb_erase(&block_group->cache_node,
10561                  &root->fs_info->block_group_cache_tree);
10562         RB_CLEAR_NODE(&block_group->cache_node);
10563
10564         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10565                 root->fs_info->first_logical_byte = (u64)-1;
10566         spin_unlock(&root->fs_info->block_group_cache_lock);
10567
10568         down_write(&block_group->space_info->groups_sem);
10569         /*
10570          * we must use list_del_init so people can check to see if they
10571          * are still on the list after taking the semaphore
10572          */
10573         list_del_init(&block_group->list);
10574         if (list_empty(&block_group->space_info->block_groups[index])) {
10575                 kobj = block_group->space_info->block_group_kobjs[index];
10576                 block_group->space_info->block_group_kobjs[index] = NULL;
10577                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10578         }
10579         up_write(&block_group->space_info->groups_sem);
10580         if (kobj) {
10581                 kobject_del(kobj);
10582                 kobject_put(kobj);
10583         }
10584
10585         if (block_group->has_caching_ctl)
10586                 caching_ctl = get_caching_control(block_group);
10587         if (block_group->cached == BTRFS_CACHE_STARTED)
10588                 wait_block_group_cache_done(block_group);
10589         if (block_group->has_caching_ctl) {
10590                 down_write(&root->fs_info->commit_root_sem);
10591                 if (!caching_ctl) {
10592                         struct btrfs_caching_control *ctl;
10593
10594                         list_for_each_entry(ctl,
10595                                     &root->fs_info->caching_block_groups, list)
10596                                 if (ctl->block_group == block_group) {
10597                                         caching_ctl = ctl;
10598                                         atomic_inc(&caching_ctl->count);
10599                                         break;
10600                                 }
10601                 }
10602                 if (caching_ctl)
10603                         list_del_init(&caching_ctl->list);
10604                 up_write(&root->fs_info->commit_root_sem);
10605                 if (caching_ctl) {
10606                         /* Once for the caching bgs list and once for us. */
10607                         put_caching_control(caching_ctl);
10608                         put_caching_control(caching_ctl);
10609                 }
10610         }
10611
10612         spin_lock(&trans->transaction->dirty_bgs_lock);
10613         if (!list_empty(&block_group->dirty_list)) {
10614                 WARN_ON(1);
10615         }
10616         if (!list_empty(&block_group->io_list)) {
10617                 WARN_ON(1);
10618         }
10619         spin_unlock(&trans->transaction->dirty_bgs_lock);
10620         btrfs_remove_free_space_cache(block_group);
10621
10622         spin_lock(&block_group->space_info->lock);
10623         list_del_init(&block_group->ro_list);
10624
10625         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10626                 WARN_ON(block_group->space_info->total_bytes
10627                         < block_group->key.offset);
10628                 WARN_ON(block_group->space_info->bytes_readonly
10629                         < block_group->key.offset);
10630                 WARN_ON(block_group->space_info->disk_total
10631                         < block_group->key.offset * factor);
10632         }
10633         block_group->space_info->total_bytes -= block_group->key.offset;
10634         block_group->space_info->bytes_readonly -= block_group->key.offset;
10635         block_group->space_info->disk_total -= block_group->key.offset * factor;
10636
10637         spin_unlock(&block_group->space_info->lock);
10638
10639         memcpy(&key, &block_group->key, sizeof(key));
10640
10641         lock_chunks(root);
10642         if (!list_empty(&em->list)) {
10643                 /* We're in the transaction->pending_chunks list. */
10644                 free_extent_map(em);
10645         }
10646         spin_lock(&block_group->lock);
10647         block_group->removed = 1;
10648         /*
10649          * At this point trimming can't start on this block group, because we
10650          * removed the block group from the tree fs_info->block_group_cache_tree
10651          * so no one can't find it anymore and even if someone already got this
10652          * block group before we removed it from the rbtree, they have already
10653          * incremented block_group->trimming - if they didn't, they won't find
10654          * any free space entries because we already removed them all when we
10655          * called btrfs_remove_free_space_cache().
10656          *
10657          * And we must not remove the extent map from the fs_info->mapping_tree
10658          * to prevent the same logical address range and physical device space
10659          * ranges from being reused for a new block group. This is because our
10660          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10661          * completely transactionless, so while it is trimming a range the
10662          * currently running transaction might finish and a new one start,
10663          * allowing for new block groups to be created that can reuse the same
10664          * physical device locations unless we take this special care.
10665          *
10666          * There may also be an implicit trim operation if the file system
10667          * is mounted with -odiscard. The same protections must remain
10668          * in place until the extents have been discarded completely when
10669          * the transaction commit has completed.
10670          */
10671         remove_em = (atomic_read(&block_group->trimming) == 0);
10672         /*
10673          * Make sure a trimmer task always sees the em in the pinned_chunks list
10674          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10675          * before checking block_group->removed).
10676          */
10677         if (!remove_em) {
10678                 /*
10679                  * Our em might be in trans->transaction->pending_chunks which
10680                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10681                  * and so is the fs_info->pinned_chunks list.
10682                  *
10683                  * So at this point we must be holding the chunk_mutex to avoid
10684                  * any races with chunk allocation (more specifically at
10685                  * volumes.c:contains_pending_extent()), to ensure it always
10686                  * sees the em, either in the pending_chunks list or in the
10687                  * pinned_chunks list.
10688                  */
10689                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10690         }
10691         spin_unlock(&block_group->lock);
10692
10693         if (remove_em) {
10694                 struct extent_map_tree *em_tree;
10695
10696                 em_tree = &root->fs_info->mapping_tree.map_tree;
10697                 write_lock(&em_tree->lock);
10698                 /*
10699                  * The em might be in the pending_chunks list, so make sure the
10700                  * chunk mutex is locked, since remove_extent_mapping() will
10701                  * delete us from that list.
10702                  */
10703                 remove_extent_mapping(em_tree, em);
10704                 write_unlock(&em_tree->lock);
10705                 /* once for the tree */
10706                 free_extent_map(em);
10707         }
10708
10709         unlock_chunks(root);
10710
10711         ret = remove_block_group_free_space(trans, root->fs_info, block_group);
10712         if (ret)
10713                 goto out;
10714
10715         btrfs_put_block_group(block_group);
10716         btrfs_put_block_group(block_group);
10717
10718         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10719         if (ret > 0)
10720                 ret = -EIO;
10721         if (ret < 0)
10722                 goto out;
10723
10724         ret = btrfs_del_item(trans, root, path);
10725 out:
10726         btrfs_free_path(path);
10727         return ret;
10728 }
10729
10730 struct btrfs_trans_handle *
10731 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10732                                      const u64 chunk_offset)
10733 {
10734         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10735         struct extent_map *em;
10736         struct map_lookup *map;
10737         unsigned int num_items;
10738
10739         read_lock(&em_tree->lock);
10740         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10741         read_unlock(&em_tree->lock);
10742         ASSERT(em && em->start == chunk_offset);
10743
10744         /*
10745          * We need to reserve 3 + N units from the metadata space info in order
10746          * to remove a block group (done at btrfs_remove_chunk() and at
10747          * btrfs_remove_block_group()), which are used for:
10748          *
10749          * 1 unit for adding the free space inode's orphan (located in the tree
10750          * of tree roots).
10751          * 1 unit for deleting the block group item (located in the extent
10752          * tree).
10753          * 1 unit for deleting the free space item (located in tree of tree
10754          * roots).
10755          * N units for deleting N device extent items corresponding to each
10756          * stripe (located in the device tree).
10757          *
10758          * In order to remove a block group we also need to reserve units in the
10759          * system space info in order to update the chunk tree (update one or
10760          * more device items and remove one chunk item), but this is done at
10761          * btrfs_remove_chunk() through a call to check_system_chunk().
10762          */
10763         map = em->map_lookup;
10764         num_items = 3 + map->num_stripes;
10765         free_extent_map(em);
10766
10767         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10768                                                            num_items, 1);
10769 }
10770
10771 /*
10772  * Process the unused_bgs list and remove any that don't have any allocated
10773  * space inside of them.
10774  */
10775 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10776 {
10777         struct btrfs_block_group_cache *block_group;
10778         struct btrfs_space_info *space_info;
10779         struct btrfs_root *root = fs_info->extent_root;
10780         struct btrfs_trans_handle *trans;
10781         int ret = 0;
10782
10783         if (!fs_info->open)
10784                 return;
10785
10786         spin_lock(&fs_info->unused_bgs_lock);
10787         while (!list_empty(&fs_info->unused_bgs)) {
10788                 u64 start, end;
10789                 int trimming;
10790
10791                 block_group = list_first_entry(&fs_info->unused_bgs,
10792                                                struct btrfs_block_group_cache,
10793                                                bg_list);
10794                 list_del_init(&block_group->bg_list);
10795
10796                 space_info = block_group->space_info;
10797
10798                 if (ret || btrfs_mixed_space_info(space_info)) {
10799                         btrfs_put_block_group(block_group);
10800                         continue;
10801                 }
10802                 spin_unlock(&fs_info->unused_bgs_lock);
10803
10804                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10805
10806                 /* Don't want to race with allocators so take the groups_sem */
10807                 down_write(&space_info->groups_sem);
10808                 spin_lock(&block_group->lock);
10809                 if (block_group->reserved ||
10810                     btrfs_block_group_used(&block_group->item) ||
10811                     block_group->ro ||
10812                     list_is_singular(&block_group->list)) {
10813                         /*
10814                          * We want to bail if we made new allocations or have
10815                          * outstanding allocations in this block group.  We do
10816                          * the ro check in case balance is currently acting on
10817                          * this block group.
10818                          */
10819                         spin_unlock(&block_group->lock);
10820                         up_write(&space_info->groups_sem);
10821                         goto next;
10822                 }
10823                 spin_unlock(&block_group->lock);
10824
10825                 /* We don't want to force the issue, only flip if it's ok. */
10826                 ret = inc_block_group_ro(block_group, 0);
10827                 up_write(&space_info->groups_sem);
10828                 if (ret < 0) {
10829                         ret = 0;
10830                         goto next;
10831                 }
10832
10833                 /*
10834                  * Want to do this before we do anything else so we can recover
10835                  * properly if we fail to join the transaction.
10836                  */
10837                 trans = btrfs_start_trans_remove_block_group(fs_info,
10838                                                      block_group->key.objectid);
10839                 if (IS_ERR(trans)) {
10840                         btrfs_dec_block_group_ro(root, block_group);
10841                         ret = PTR_ERR(trans);
10842                         goto next;
10843                 }
10844
10845                 /*
10846                  * We could have pending pinned extents for this block group,
10847                  * just delete them, we don't care about them anymore.
10848                  */
10849                 start = block_group->key.objectid;
10850                 end = start + block_group->key.offset - 1;
10851                 /*
10852                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10853                  * btrfs_finish_extent_commit(). If we are at transaction N,
10854                  * another task might be running finish_extent_commit() for the
10855                  * previous transaction N - 1, and have seen a range belonging
10856                  * to the block group in freed_extents[] before we were able to
10857                  * clear the whole block group range from freed_extents[]. This
10858                  * means that task can lookup for the block group after we
10859                  * unpinned it from freed_extents[] and removed it, leading to
10860                  * a BUG_ON() at btrfs_unpin_extent_range().
10861                  */
10862                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10863                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10864                                   EXTENT_DIRTY);
10865                 if (ret) {
10866                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10867                         btrfs_dec_block_group_ro(root, block_group);
10868                         goto end_trans;
10869                 }
10870                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10871                                   EXTENT_DIRTY);
10872                 if (ret) {
10873                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10874                         btrfs_dec_block_group_ro(root, block_group);
10875                         goto end_trans;
10876                 }
10877                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10878
10879                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10880                 spin_lock(&space_info->lock);
10881                 spin_lock(&block_group->lock);
10882
10883                 space_info->bytes_pinned -= block_group->pinned;
10884                 space_info->bytes_readonly += block_group->pinned;
10885                 percpu_counter_add(&space_info->total_bytes_pinned,
10886                                    -block_group->pinned);
10887                 block_group->pinned = 0;
10888
10889                 spin_unlock(&block_group->lock);
10890                 spin_unlock(&space_info->lock);
10891
10892                 /* DISCARD can flip during remount */
10893                 trimming = btrfs_test_opt(root, DISCARD);
10894
10895                 /* Implicit trim during transaction commit. */
10896                 if (trimming)
10897                         btrfs_get_block_group_trimming(block_group);
10898
10899                 /*
10900                  * Btrfs_remove_chunk will abort the transaction if things go
10901                  * horribly wrong.
10902                  */
10903                 ret = btrfs_remove_chunk(trans, root,
10904                                          block_group->key.objectid);
10905
10906                 if (ret) {
10907                         if (trimming)
10908                                 btrfs_put_block_group_trimming(block_group);
10909                         goto end_trans;
10910                 }
10911
10912                 /*
10913                  * If we're not mounted with -odiscard, we can just forget
10914                  * about this block group. Otherwise we'll need to wait
10915                  * until transaction commit to do the actual discard.
10916                  */
10917                 if (trimming) {
10918                         spin_lock(&fs_info->unused_bgs_lock);
10919                         /*
10920                          * A concurrent scrub might have added us to the list
10921                          * fs_info->unused_bgs, so use a list_move operation
10922                          * to add the block group to the deleted_bgs list.
10923                          */
10924                         list_move(&block_group->bg_list,
10925                                   &trans->transaction->deleted_bgs);
10926                         spin_unlock(&fs_info->unused_bgs_lock);
10927                         btrfs_get_block_group(block_group);
10928                 }
10929 end_trans:
10930                 btrfs_end_transaction(trans, root);
10931 next:
10932                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10933                 btrfs_put_block_group(block_group);
10934                 spin_lock(&fs_info->unused_bgs_lock);
10935         }
10936         spin_unlock(&fs_info->unused_bgs_lock);
10937 }
10938
10939 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10940 {
10941         struct btrfs_space_info *space_info;
10942         struct btrfs_super_block *disk_super;
10943         u64 features;
10944         u64 flags;
10945         int mixed = 0;
10946         int ret;
10947
10948         disk_super = fs_info->super_copy;
10949         if (!btrfs_super_root(disk_super))
10950                 return -EINVAL;
10951
10952         features = btrfs_super_incompat_flags(disk_super);
10953         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10954                 mixed = 1;
10955
10956         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10957         ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10958         if (ret)
10959                 goto out;
10960
10961         if (mixed) {
10962                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10963                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10964         } else {
10965                 flags = BTRFS_BLOCK_GROUP_METADATA;
10966                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10967                 if (ret)
10968                         goto out;
10969
10970                 flags = BTRFS_BLOCK_GROUP_DATA;
10971                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10972         }
10973 out:
10974         return ret;
10975 }
10976
10977 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10978 {
10979         return unpin_extent_range(root, start, end, false);
10980 }
10981
10982 /*
10983  * It used to be that old block groups would be left around forever.
10984  * Iterating over them would be enough to trim unused space.  Since we
10985  * now automatically remove them, we also need to iterate over unallocated
10986  * space.
10987  *
10988  * We don't want a transaction for this since the discard may take a
10989  * substantial amount of time.  We don't require that a transaction be
10990  * running, but we do need to take a running transaction into account
10991  * to ensure that we're not discarding chunks that were released in
10992  * the current transaction.
10993  *
10994  * Holding the chunks lock will prevent other threads from allocating
10995  * or releasing chunks, but it won't prevent a running transaction
10996  * from committing and releasing the memory that the pending chunks
10997  * list head uses.  For that, we need to take a reference to the
10998  * transaction.
10999  */
11000 static int btrfs_trim_free_extents(struct btrfs_device *device,
11001                                    u64 minlen, u64 *trimmed)
11002 {
11003         u64 start = 0, len = 0;
11004         int ret;
11005
11006         *trimmed = 0;
11007
11008         /* Not writeable = nothing to do. */
11009         if (!device->writeable)
11010                 return 0;
11011
11012         /* No free space = nothing to do. */
11013         if (device->total_bytes <= device->bytes_used)
11014                 return 0;
11015
11016         ret = 0;
11017
11018         while (1) {
11019                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
11020                 struct btrfs_transaction *trans;
11021                 u64 bytes;
11022
11023                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
11024                 if (ret)
11025                         return ret;
11026
11027                 down_read(&fs_info->commit_root_sem);
11028
11029                 spin_lock(&fs_info->trans_lock);
11030                 trans = fs_info->running_transaction;
11031                 if (trans)
11032                         atomic_inc(&trans->use_count);
11033                 spin_unlock(&fs_info->trans_lock);
11034
11035                 ret = find_free_dev_extent_start(trans, device, minlen, start,
11036                                                  &start, &len);
11037                 if (trans)
11038                         btrfs_put_transaction(trans);
11039
11040                 if (ret) {
11041                         up_read(&fs_info->commit_root_sem);
11042                         mutex_unlock(&fs_info->chunk_mutex);
11043                         if (ret == -ENOSPC)
11044                                 ret = 0;
11045                         break;
11046                 }
11047
11048                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11049                 up_read(&fs_info->commit_root_sem);
11050                 mutex_unlock(&fs_info->chunk_mutex);
11051
11052                 if (ret)
11053                         break;
11054
11055                 start += len;
11056                 *trimmed += bytes;
11057
11058                 if (fatal_signal_pending(current)) {
11059                         ret = -ERESTARTSYS;
11060                         break;
11061                 }
11062
11063                 cond_resched();
11064         }
11065
11066         return ret;
11067 }
11068
11069 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
11070 {
11071         struct btrfs_fs_info *fs_info = root->fs_info;
11072         struct btrfs_block_group_cache *cache = NULL;
11073         struct btrfs_device *device;
11074         struct list_head *devices;
11075         u64 group_trimmed;
11076         u64 start;
11077         u64 end;
11078         u64 trimmed = 0;
11079         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
11080         int ret = 0;
11081
11082         /*
11083          * try to trim all FS space, our block group may start from non-zero.
11084          */
11085         if (range->len == total_bytes)
11086                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
11087         else
11088                 cache = btrfs_lookup_block_group(fs_info, range->start);
11089
11090         while (cache) {
11091                 if (cache->key.objectid >= (range->start + range->len)) {
11092                         btrfs_put_block_group(cache);
11093                         break;
11094                 }
11095
11096                 start = max(range->start, cache->key.objectid);
11097                 end = min(range->start + range->len,
11098                                 cache->key.objectid + cache->key.offset);
11099
11100                 if (end - start >= range->minlen) {
11101                         if (!block_group_cache_done(cache)) {
11102                                 ret = cache_block_group(cache, 0);
11103                                 if (ret) {
11104                                         btrfs_put_block_group(cache);
11105                                         break;
11106                                 }
11107                                 ret = wait_block_group_cache_done(cache);
11108                                 if (ret) {
11109                                         btrfs_put_block_group(cache);
11110                                         break;
11111                                 }
11112                         }
11113                         ret = btrfs_trim_block_group(cache,
11114                                                      &group_trimmed,
11115                                                      start,
11116                                                      end,
11117                                                      range->minlen);
11118
11119                         trimmed += group_trimmed;
11120                         if (ret) {
11121                                 btrfs_put_block_group(cache);
11122                                 break;
11123                         }
11124                 }
11125
11126                 cache = next_block_group(fs_info->tree_root, cache);
11127         }
11128
11129         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
11130         devices = &root->fs_info->fs_devices->alloc_list;
11131         list_for_each_entry(device, devices, dev_alloc_list) {
11132                 ret = btrfs_trim_free_extents(device, range->minlen,
11133                                               &group_trimmed);
11134                 if (ret)
11135                         break;
11136
11137                 trimmed += group_trimmed;
11138         }
11139         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
11140
11141         range->len = trimmed;
11142         return ret;
11143 }
11144
11145 /*
11146  * btrfs_{start,end}_write_no_snapshoting() are similar to
11147  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
11148  * data into the page cache through nocow before the subvolume is snapshoted,
11149  * but flush the data into disk after the snapshot creation, or to prevent
11150  * operations while snapshoting is ongoing and that cause the snapshot to be
11151  * inconsistent (writes followed by expanding truncates for example).
11152  */
11153 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
11154 {
11155         percpu_counter_dec(&root->subv_writers->counter);
11156         /*
11157          * Make sure counter is updated before we wake up waiters.
11158          */
11159         smp_mb();
11160         if (waitqueue_active(&root->subv_writers->wait))
11161                 wake_up(&root->subv_writers->wait);
11162 }
11163
11164 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
11165 {
11166         if (atomic_read(&root->will_be_snapshoted))
11167                 return 0;
11168
11169         percpu_counter_inc(&root->subv_writers->counter);
11170         /*
11171          * Make sure counter is updated before we check for snapshot creation.
11172          */
11173         smp_mb();
11174         if (atomic_read(&root->will_be_snapshoted)) {
11175                 btrfs_end_write_no_snapshoting(root);
11176                 return 0;
11177         }
11178         return 1;
11179 }
11180
11181 static int wait_snapshoting_atomic_t(atomic_t *a)
11182 {
11183         schedule();
11184         return 0;
11185 }
11186
11187 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11188 {
11189         while (true) {
11190                 int ret;
11191
11192                 ret = btrfs_start_write_no_snapshoting(root);
11193                 if (ret)
11194                         break;
11195                 wait_on_atomic_t(&root->will_be_snapshoted,
11196                                  wait_snapshoting_atomic_t,
11197                                  TASK_UNINTERRUPTIBLE);
11198         }
11199 }