Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58         CHUNK_ALLOC_NO_FORCE = 0,
59         CHUNK_ALLOC_LIMITED = 1,
60         CHUNK_ALLOC_FORCE = 2,
61 };
62
63 static int update_block_group(struct btrfs_trans_handle *trans,
64                               struct btrfs_root *root, u64 bytenr,
65                               u64 num_bytes, int alloc);
66 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
67                                 struct btrfs_root *root,
68                                 struct btrfs_delayed_ref_node *node, u64 parent,
69                                 u64 root_objectid, u64 owner_objectid,
70                                 u64 owner_offset, int refs_to_drop,
71                                 struct btrfs_delayed_extent_op *extra_op);
72 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
73                                     struct extent_buffer *leaf,
74                                     struct btrfs_extent_item *ei);
75 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
76                                       struct btrfs_root *root,
77                                       u64 parent, u64 root_objectid,
78                                       u64 flags, u64 owner, u64 offset,
79                                       struct btrfs_key *ins, int ref_mod);
80 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
81                                      struct btrfs_root *root,
82                                      u64 parent, u64 root_objectid,
83                                      u64 flags, struct btrfs_disk_key *key,
84                                      int level, struct btrfs_key *ins);
85 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
86                           struct btrfs_root *extent_root, u64 flags,
87                           int force);
88 static int find_next_key(struct btrfs_path *path, int level,
89                          struct btrfs_key *key);
90 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
91                             int dump_block_groups);
92 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
93                                     u64 ram_bytes, u64 num_bytes, int delalloc);
94 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
95                                      u64 num_bytes, int delalloc);
96 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
97                                u64 num_bytes);
98 int btrfs_pin_extent(struct btrfs_root *root,
99                      u64 bytenr, u64 num_bytes, int reserved);
100 static int __reserve_metadata_bytes(struct btrfs_root *root,
101                                     struct btrfs_space_info *space_info,
102                                     u64 orig_bytes,
103                                     enum btrfs_reserve_flush_enum flush);
104 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
105                                      struct btrfs_space_info *space_info,
106                                      u64 num_bytes);
107 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
108                                      struct btrfs_space_info *space_info,
109                                      u64 num_bytes);
110
111 static noinline int
112 block_group_cache_done(struct btrfs_block_group_cache *cache)
113 {
114         smp_mb();
115         return cache->cached == BTRFS_CACHE_FINISHED ||
116                 cache->cached == BTRFS_CACHE_ERROR;
117 }
118
119 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
120 {
121         return (cache->flags & bits) == bits;
122 }
123
124 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
125 {
126         atomic_inc(&cache->count);
127 }
128
129 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
130 {
131         if (atomic_dec_and_test(&cache->count)) {
132                 WARN_ON(cache->pinned > 0);
133                 WARN_ON(cache->reserved > 0);
134                 kfree(cache->free_space_ctl);
135                 kfree(cache);
136         }
137 }
138
139 /*
140  * this adds the block group to the fs_info rb tree for the block group
141  * cache
142  */
143 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
144                                 struct btrfs_block_group_cache *block_group)
145 {
146         struct rb_node **p;
147         struct rb_node *parent = NULL;
148         struct btrfs_block_group_cache *cache;
149
150         spin_lock(&info->block_group_cache_lock);
151         p = &info->block_group_cache_tree.rb_node;
152
153         while (*p) {
154                 parent = *p;
155                 cache = rb_entry(parent, struct btrfs_block_group_cache,
156                                  cache_node);
157                 if (block_group->key.objectid < cache->key.objectid) {
158                         p = &(*p)->rb_left;
159                 } else if (block_group->key.objectid > cache->key.objectid) {
160                         p = &(*p)->rb_right;
161                 } else {
162                         spin_unlock(&info->block_group_cache_lock);
163                         return -EEXIST;
164                 }
165         }
166
167         rb_link_node(&block_group->cache_node, parent, p);
168         rb_insert_color(&block_group->cache_node,
169                         &info->block_group_cache_tree);
170
171         if (info->first_logical_byte > block_group->key.objectid)
172                 info->first_logical_byte = block_group->key.objectid;
173
174         spin_unlock(&info->block_group_cache_lock);
175
176         return 0;
177 }
178
179 /*
180  * This will return the block group at or after bytenr if contains is 0, else
181  * it will return the block group that contains the bytenr
182  */
183 static struct btrfs_block_group_cache *
184 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
185                               int contains)
186 {
187         struct btrfs_block_group_cache *cache, *ret = NULL;
188         struct rb_node *n;
189         u64 end, start;
190
191         spin_lock(&info->block_group_cache_lock);
192         n = info->block_group_cache_tree.rb_node;
193
194         while (n) {
195                 cache = rb_entry(n, struct btrfs_block_group_cache,
196                                  cache_node);
197                 end = cache->key.objectid + cache->key.offset - 1;
198                 start = cache->key.objectid;
199
200                 if (bytenr < start) {
201                         if (!contains && (!ret || start < ret->key.objectid))
202                                 ret = cache;
203                         n = n->rb_left;
204                 } else if (bytenr > start) {
205                         if (contains && bytenr <= end) {
206                                 ret = cache;
207                                 break;
208                         }
209                         n = n->rb_right;
210                 } else {
211                         ret = cache;
212                         break;
213                 }
214         }
215         if (ret) {
216                 btrfs_get_block_group(ret);
217                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
218                         info->first_logical_byte = ret->key.objectid;
219         }
220         spin_unlock(&info->block_group_cache_lock);
221
222         return ret;
223 }
224
225 static int add_excluded_extent(struct btrfs_root *root,
226                                u64 start, u64 num_bytes)
227 {
228         u64 end = start + num_bytes - 1;
229         set_extent_bits(&root->fs_info->freed_extents[0],
230                         start, end, EXTENT_UPTODATE);
231         set_extent_bits(&root->fs_info->freed_extents[1],
232                         start, end, EXTENT_UPTODATE);
233         return 0;
234 }
235
236 static void free_excluded_extents(struct btrfs_root *root,
237                                   struct btrfs_block_group_cache *cache)
238 {
239         u64 start, end;
240
241         start = cache->key.objectid;
242         end = start + cache->key.offset - 1;
243
244         clear_extent_bits(&root->fs_info->freed_extents[0],
245                           start, end, EXTENT_UPTODATE);
246         clear_extent_bits(&root->fs_info->freed_extents[1],
247                           start, end, EXTENT_UPTODATE);
248 }
249
250 static int exclude_super_stripes(struct btrfs_root *root,
251                                  struct btrfs_block_group_cache *cache)
252 {
253         u64 bytenr;
254         u64 *logical;
255         int stripe_len;
256         int i, nr, ret;
257
258         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
259                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
260                 cache->bytes_super += stripe_len;
261                 ret = add_excluded_extent(root, cache->key.objectid,
262                                           stripe_len);
263                 if (ret)
264                         return ret;
265         }
266
267         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
268                 bytenr = btrfs_sb_offset(i);
269                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
270                                        cache->key.objectid, bytenr,
271                                        0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(root, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (!cache->caching_ctl) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         ctl = cache->caching_ctl;
320         atomic_inc(&ctl->count);
321         spin_unlock(&cache->lock);
322         return ctl;
323 }
324
325 static void put_caching_control(struct btrfs_caching_control *ctl)
326 {
327         if (atomic_dec_and_test(&ctl->count))
328                 kfree(ctl);
329 }
330
331 #ifdef CONFIG_BTRFS_DEBUG
332 static void fragment_free_space(struct btrfs_root *root,
333                                 struct btrfs_block_group_cache *block_group)
334 {
335         u64 start = block_group->key.objectid;
336         u64 len = block_group->key.offset;
337         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
338                 root->nodesize : root->sectorsize;
339         u64 step = chunk << 1;
340
341         while (len > chunk) {
342                 btrfs_remove_free_space(block_group, start, chunk);
343                 start += step;
344                 if (len < step)
345                         len = 0;
346                 else
347                         len -= step;
348         }
349 }
350 #endif
351
352 /*
353  * this is only called by cache_block_group, since we could have freed extents
354  * we need to check the pinned_extents for any extents that can't be used yet
355  * since their free space will be released as soon as the transaction commits.
356  */
357 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
358                        struct btrfs_fs_info *info, u64 start, u64 end)
359 {
360         u64 extent_start, extent_end, size, total_added = 0;
361         int ret;
362
363         while (start < end) {
364                 ret = find_first_extent_bit(info->pinned_extents, start,
365                                             &extent_start, &extent_end,
366                                             EXTENT_DIRTY | EXTENT_UPTODATE,
367                                             NULL);
368                 if (ret)
369                         break;
370
371                 if (extent_start <= start) {
372                         start = extent_end + 1;
373                 } else if (extent_start > start && extent_start < end) {
374                         size = extent_start - start;
375                         total_added += size;
376                         ret = btrfs_add_free_space(block_group, start,
377                                                    size);
378                         BUG_ON(ret); /* -ENOMEM or logic error */
379                         start = extent_end + 1;
380                 } else {
381                         break;
382                 }
383         }
384
385         if (start < end) {
386                 size = end - start;
387                 total_added += size;
388                 ret = btrfs_add_free_space(block_group, start, size);
389                 BUG_ON(ret); /* -ENOMEM or logic error */
390         }
391
392         return total_added;
393 }
394
395 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
396 {
397         struct btrfs_block_group_cache *block_group;
398         struct btrfs_fs_info *fs_info;
399         struct btrfs_root *extent_root;
400         struct btrfs_path *path;
401         struct extent_buffer *leaf;
402         struct btrfs_key key;
403         u64 total_found = 0;
404         u64 last = 0;
405         u32 nritems;
406         int ret;
407         bool wakeup = true;
408
409         block_group = caching_ctl->block_group;
410         fs_info = block_group->fs_info;
411         extent_root = fs_info->extent_root;
412
413         path = btrfs_alloc_path();
414         if (!path)
415                 return -ENOMEM;
416
417         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
418
419 #ifdef CONFIG_BTRFS_DEBUG
420         /*
421          * If we're fragmenting we don't want to make anybody think we can
422          * allocate from this block group until we've had a chance to fragment
423          * the free space.
424          */
425         if (btrfs_should_fragment_free_space(extent_root, block_group))
426                 wakeup = false;
427 #endif
428         /*
429          * We don't want to deadlock with somebody trying to allocate a new
430          * extent for the extent root while also trying to search the extent
431          * root to add free space.  So we skip locking and search the commit
432          * root, since its read-only
433          */
434         path->skip_locking = 1;
435         path->search_commit_root = 1;
436         path->reada = READA_FORWARD;
437
438         key.objectid = last;
439         key.offset = 0;
440         key.type = BTRFS_EXTENT_ITEM_KEY;
441
442 next:
443         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
444         if (ret < 0)
445                 goto out;
446
447         leaf = path->nodes[0];
448         nritems = btrfs_header_nritems(leaf);
449
450         while (1) {
451                 if (btrfs_fs_closing(fs_info) > 1) {
452                         last = (u64)-1;
453                         break;
454                 }
455
456                 if (path->slots[0] < nritems) {
457                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
458                 } else {
459                         ret = find_next_key(path, 0, &key);
460                         if (ret)
461                                 break;
462
463                         if (need_resched() ||
464                             rwsem_is_contended(&fs_info->commit_root_sem)) {
465                                 if (wakeup)
466                                         caching_ctl->progress = last;
467                                 btrfs_release_path(path);
468                                 up_read(&fs_info->commit_root_sem);
469                                 mutex_unlock(&caching_ctl->mutex);
470                                 cond_resched();
471                                 mutex_lock(&caching_ctl->mutex);
472                                 down_read(&fs_info->commit_root_sem);
473                                 goto next;
474                         }
475
476                         ret = btrfs_next_leaf(extent_root, path);
477                         if (ret < 0)
478                                 goto out;
479                         if (ret)
480                                 break;
481                         leaf = path->nodes[0];
482                         nritems = btrfs_header_nritems(leaf);
483                         continue;
484                 }
485
486                 if (key.objectid < last) {
487                         key.objectid = last;
488                         key.offset = 0;
489                         key.type = BTRFS_EXTENT_ITEM_KEY;
490
491                         if (wakeup)
492                                 caching_ctl->progress = last;
493                         btrfs_release_path(path);
494                         goto next;
495                 }
496
497                 if (key.objectid < block_group->key.objectid) {
498                         path->slots[0]++;
499                         continue;
500                 }
501
502                 if (key.objectid >= block_group->key.objectid +
503                     block_group->key.offset)
504                         break;
505
506                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
507                     key.type == BTRFS_METADATA_ITEM_KEY) {
508                         total_found += add_new_free_space(block_group,
509                                                           fs_info, last,
510                                                           key.objectid);
511                         if (key.type == BTRFS_METADATA_ITEM_KEY)
512                                 last = key.objectid +
513                                         fs_info->tree_root->nodesize;
514                         else
515                                 last = key.objectid + key.offset;
516
517                         if (total_found > CACHING_CTL_WAKE_UP) {
518                                 total_found = 0;
519                                 if (wakeup)
520                                         wake_up(&caching_ctl->wait);
521                         }
522                 }
523                 path->slots[0]++;
524         }
525         ret = 0;
526
527         total_found += add_new_free_space(block_group, fs_info, last,
528                                           block_group->key.objectid +
529                                           block_group->key.offset);
530         caching_ctl->progress = (u64)-1;
531
532 out:
533         btrfs_free_path(path);
534         return ret;
535 }
536
537 static noinline void caching_thread(struct btrfs_work *work)
538 {
539         struct btrfs_block_group_cache *block_group;
540         struct btrfs_fs_info *fs_info;
541         struct btrfs_caching_control *caching_ctl;
542         struct btrfs_root *extent_root;
543         int ret;
544
545         caching_ctl = container_of(work, struct btrfs_caching_control, work);
546         block_group = caching_ctl->block_group;
547         fs_info = block_group->fs_info;
548         extent_root = fs_info->extent_root;
549
550         mutex_lock(&caching_ctl->mutex);
551         down_read(&fs_info->commit_root_sem);
552
553         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
554                 ret = load_free_space_tree(caching_ctl);
555         else
556                 ret = load_extent_tree_free(caching_ctl);
557
558         spin_lock(&block_group->lock);
559         block_group->caching_ctl = NULL;
560         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
561         spin_unlock(&block_group->lock);
562
563 #ifdef CONFIG_BTRFS_DEBUG
564         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
565                 u64 bytes_used;
566
567                 spin_lock(&block_group->space_info->lock);
568                 spin_lock(&block_group->lock);
569                 bytes_used = block_group->key.offset -
570                         btrfs_block_group_used(&block_group->item);
571                 block_group->space_info->bytes_used += bytes_used >> 1;
572                 spin_unlock(&block_group->lock);
573                 spin_unlock(&block_group->space_info->lock);
574                 fragment_free_space(extent_root, block_group);
575         }
576 #endif
577
578         caching_ctl->progress = (u64)-1;
579
580         up_read(&fs_info->commit_root_sem);
581         free_excluded_extents(fs_info->extent_root, block_group);
582         mutex_unlock(&caching_ctl->mutex);
583
584         wake_up(&caching_ctl->wait);
585
586         put_caching_control(caching_ctl);
587         btrfs_put_block_group(block_group);
588 }
589
590 static int cache_block_group(struct btrfs_block_group_cache *cache,
591                              int load_cache_only)
592 {
593         DEFINE_WAIT(wait);
594         struct btrfs_fs_info *fs_info = cache->fs_info;
595         struct btrfs_caching_control *caching_ctl;
596         int ret = 0;
597
598         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
599         if (!caching_ctl)
600                 return -ENOMEM;
601
602         INIT_LIST_HEAD(&caching_ctl->list);
603         mutex_init(&caching_ctl->mutex);
604         init_waitqueue_head(&caching_ctl->wait);
605         caching_ctl->block_group = cache;
606         caching_ctl->progress = cache->key.objectid;
607         atomic_set(&caching_ctl->count, 1);
608         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
609                         caching_thread, NULL, NULL);
610
611         spin_lock(&cache->lock);
612         /*
613          * This should be a rare occasion, but this could happen I think in the
614          * case where one thread starts to load the space cache info, and then
615          * some other thread starts a transaction commit which tries to do an
616          * allocation while the other thread is still loading the space cache
617          * info.  The previous loop should have kept us from choosing this block
618          * group, but if we've moved to the state where we will wait on caching
619          * block groups we need to first check if we're doing a fast load here,
620          * so we can wait for it to finish, otherwise we could end up allocating
621          * from a block group who's cache gets evicted for one reason or
622          * another.
623          */
624         while (cache->cached == BTRFS_CACHE_FAST) {
625                 struct btrfs_caching_control *ctl;
626
627                 ctl = cache->caching_ctl;
628                 atomic_inc(&ctl->count);
629                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
630                 spin_unlock(&cache->lock);
631
632                 schedule();
633
634                 finish_wait(&ctl->wait, &wait);
635                 put_caching_control(ctl);
636                 spin_lock(&cache->lock);
637         }
638
639         if (cache->cached != BTRFS_CACHE_NO) {
640                 spin_unlock(&cache->lock);
641                 kfree(caching_ctl);
642                 return 0;
643         }
644         WARN_ON(cache->caching_ctl);
645         cache->caching_ctl = caching_ctl;
646         cache->cached = BTRFS_CACHE_FAST;
647         spin_unlock(&cache->lock);
648
649         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
650                 mutex_lock(&caching_ctl->mutex);
651                 ret = load_free_space_cache(fs_info, cache);
652
653                 spin_lock(&cache->lock);
654                 if (ret == 1) {
655                         cache->caching_ctl = NULL;
656                         cache->cached = BTRFS_CACHE_FINISHED;
657                         cache->last_byte_to_unpin = (u64)-1;
658                         caching_ctl->progress = (u64)-1;
659                 } else {
660                         if (load_cache_only) {
661                                 cache->caching_ctl = NULL;
662                                 cache->cached = BTRFS_CACHE_NO;
663                         } else {
664                                 cache->cached = BTRFS_CACHE_STARTED;
665                                 cache->has_caching_ctl = 1;
666                         }
667                 }
668                 spin_unlock(&cache->lock);
669 #ifdef CONFIG_BTRFS_DEBUG
670                 if (ret == 1 &&
671                     btrfs_should_fragment_free_space(fs_info->extent_root,
672                                                      cache)) {
673                         u64 bytes_used;
674
675                         spin_lock(&cache->space_info->lock);
676                         spin_lock(&cache->lock);
677                         bytes_used = cache->key.offset -
678                                 btrfs_block_group_used(&cache->item);
679                         cache->space_info->bytes_used += bytes_used >> 1;
680                         spin_unlock(&cache->lock);
681                         spin_unlock(&cache->space_info->lock);
682                         fragment_free_space(fs_info->extent_root, cache);
683                 }
684 #endif
685                 mutex_unlock(&caching_ctl->mutex);
686
687                 wake_up(&caching_ctl->wait);
688                 if (ret == 1) {
689                         put_caching_control(caching_ctl);
690                         free_excluded_extents(fs_info->extent_root, cache);
691                         return 0;
692                 }
693         } else {
694                 /*
695                  * We're either using the free space tree or no caching at all.
696                  * Set cached to the appropriate value and wakeup any waiters.
697                  */
698                 spin_lock(&cache->lock);
699                 if (load_cache_only) {
700                         cache->caching_ctl = NULL;
701                         cache->cached = BTRFS_CACHE_NO;
702                 } else {
703                         cache->cached = BTRFS_CACHE_STARTED;
704                         cache->has_caching_ctl = 1;
705                 }
706                 spin_unlock(&cache->lock);
707                 wake_up(&caching_ctl->wait);
708         }
709
710         if (load_cache_only) {
711                 put_caching_control(caching_ctl);
712                 return 0;
713         }
714
715         down_write(&fs_info->commit_root_sem);
716         atomic_inc(&caching_ctl->count);
717         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
718         up_write(&fs_info->commit_root_sem);
719
720         btrfs_get_block_group(cache);
721
722         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
723
724         return ret;
725 }
726
727 /*
728  * return the block group that starts at or after bytenr
729  */
730 static struct btrfs_block_group_cache *
731 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
732 {
733         struct btrfs_block_group_cache *cache;
734
735         cache = block_group_cache_tree_search(info, bytenr, 0);
736
737         return cache;
738 }
739
740 /*
741  * return the block group that contains the given bytenr
742  */
743 struct btrfs_block_group_cache *btrfs_lookup_block_group(
744                                                  struct btrfs_fs_info *info,
745                                                  u64 bytenr)
746 {
747         struct btrfs_block_group_cache *cache;
748
749         cache = block_group_cache_tree_search(info, bytenr, 1);
750
751         return cache;
752 }
753
754 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
755                                                   u64 flags)
756 {
757         struct list_head *head = &info->space_info;
758         struct btrfs_space_info *found;
759
760         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
761
762         rcu_read_lock();
763         list_for_each_entry_rcu(found, head, list) {
764                 if (found->flags & flags) {
765                         rcu_read_unlock();
766                         return found;
767                 }
768         }
769         rcu_read_unlock();
770         return NULL;
771 }
772
773 /*
774  * after adding space to the filesystem, we need to clear the full flags
775  * on all the space infos.
776  */
777 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
778 {
779         struct list_head *head = &info->space_info;
780         struct btrfs_space_info *found;
781
782         rcu_read_lock();
783         list_for_each_entry_rcu(found, head, list)
784                 found->full = 0;
785         rcu_read_unlock();
786 }
787
788 /* simple helper to search for an existing data extent at a given offset */
789 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
790 {
791         int ret;
792         struct btrfs_key key;
793         struct btrfs_path *path;
794
795         path = btrfs_alloc_path();
796         if (!path)
797                 return -ENOMEM;
798
799         key.objectid = start;
800         key.offset = len;
801         key.type = BTRFS_EXTENT_ITEM_KEY;
802         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
803                                 0, 0);
804         btrfs_free_path(path);
805         return ret;
806 }
807
808 /*
809  * helper function to lookup reference count and flags of a tree block.
810  *
811  * the head node for delayed ref is used to store the sum of all the
812  * reference count modifications queued up in the rbtree. the head
813  * node may also store the extent flags to set. This way you can check
814  * to see what the reference count and extent flags would be if all of
815  * the delayed refs are not processed.
816  */
817 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
818                              struct btrfs_root *root, u64 bytenr,
819                              u64 offset, int metadata, u64 *refs, u64 *flags)
820 {
821         struct btrfs_delayed_ref_head *head;
822         struct btrfs_delayed_ref_root *delayed_refs;
823         struct btrfs_path *path;
824         struct btrfs_extent_item *ei;
825         struct extent_buffer *leaf;
826         struct btrfs_key key;
827         u32 item_size;
828         u64 num_refs;
829         u64 extent_flags;
830         int ret;
831
832         /*
833          * If we don't have skinny metadata, don't bother doing anything
834          * different
835          */
836         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
837                 offset = root->nodesize;
838                 metadata = 0;
839         }
840
841         path = btrfs_alloc_path();
842         if (!path)
843                 return -ENOMEM;
844
845         if (!trans) {
846                 path->skip_locking = 1;
847                 path->search_commit_root = 1;
848         }
849
850 search_again:
851         key.objectid = bytenr;
852         key.offset = offset;
853         if (metadata)
854                 key.type = BTRFS_METADATA_ITEM_KEY;
855         else
856                 key.type = BTRFS_EXTENT_ITEM_KEY;
857
858         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
859                                 &key, path, 0, 0);
860         if (ret < 0)
861                 goto out_free;
862
863         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
864                 if (path->slots[0]) {
865                         path->slots[0]--;
866                         btrfs_item_key_to_cpu(path->nodes[0], &key,
867                                               path->slots[0]);
868                         if (key.objectid == bytenr &&
869                             key.type == BTRFS_EXTENT_ITEM_KEY &&
870                             key.offset == root->nodesize)
871                                 ret = 0;
872                 }
873         }
874
875         if (ret == 0) {
876                 leaf = path->nodes[0];
877                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
878                 if (item_size >= sizeof(*ei)) {
879                         ei = btrfs_item_ptr(leaf, path->slots[0],
880                                             struct btrfs_extent_item);
881                         num_refs = btrfs_extent_refs(leaf, ei);
882                         extent_flags = btrfs_extent_flags(leaf, ei);
883                 } else {
884 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
885                         struct btrfs_extent_item_v0 *ei0;
886                         BUG_ON(item_size != sizeof(*ei0));
887                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
888                                              struct btrfs_extent_item_v0);
889                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
890                         /* FIXME: this isn't correct for data */
891                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
892 #else
893                         BUG();
894 #endif
895                 }
896                 BUG_ON(num_refs == 0);
897         } else {
898                 num_refs = 0;
899                 extent_flags = 0;
900                 ret = 0;
901         }
902
903         if (!trans)
904                 goto out;
905
906         delayed_refs = &trans->transaction->delayed_refs;
907         spin_lock(&delayed_refs->lock);
908         head = btrfs_find_delayed_ref_head(trans, bytenr);
909         if (head) {
910                 if (!mutex_trylock(&head->mutex)) {
911                         atomic_inc(&head->node.refs);
912                         spin_unlock(&delayed_refs->lock);
913
914                         btrfs_release_path(path);
915
916                         /*
917                          * Mutex was contended, block until it's released and try
918                          * again
919                          */
920                         mutex_lock(&head->mutex);
921                         mutex_unlock(&head->mutex);
922                         btrfs_put_delayed_ref(&head->node);
923                         goto search_again;
924                 }
925                 spin_lock(&head->lock);
926                 if (head->extent_op && head->extent_op->update_flags)
927                         extent_flags |= head->extent_op->flags_to_set;
928                 else
929                         BUG_ON(num_refs == 0);
930
931                 num_refs += head->node.ref_mod;
932                 spin_unlock(&head->lock);
933                 mutex_unlock(&head->mutex);
934         }
935         spin_unlock(&delayed_refs->lock);
936 out:
937         WARN_ON(num_refs == 0);
938         if (refs)
939                 *refs = num_refs;
940         if (flags)
941                 *flags = extent_flags;
942 out_free:
943         btrfs_free_path(path);
944         return ret;
945 }
946
947 /*
948  * Back reference rules.  Back refs have three main goals:
949  *
950  * 1) differentiate between all holders of references to an extent so that
951  *    when a reference is dropped we can make sure it was a valid reference
952  *    before freeing the extent.
953  *
954  * 2) Provide enough information to quickly find the holders of an extent
955  *    if we notice a given block is corrupted or bad.
956  *
957  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
958  *    maintenance.  This is actually the same as #2, but with a slightly
959  *    different use case.
960  *
961  * There are two kinds of back refs. The implicit back refs is optimized
962  * for pointers in non-shared tree blocks. For a given pointer in a block,
963  * back refs of this kind provide information about the block's owner tree
964  * and the pointer's key. These information allow us to find the block by
965  * b-tree searching. The full back refs is for pointers in tree blocks not
966  * referenced by their owner trees. The location of tree block is recorded
967  * in the back refs. Actually the full back refs is generic, and can be
968  * used in all cases the implicit back refs is used. The major shortcoming
969  * of the full back refs is its overhead. Every time a tree block gets
970  * COWed, we have to update back refs entry for all pointers in it.
971  *
972  * For a newly allocated tree block, we use implicit back refs for
973  * pointers in it. This means most tree related operations only involve
974  * implicit back refs. For a tree block created in old transaction, the
975  * only way to drop a reference to it is COW it. So we can detect the
976  * event that tree block loses its owner tree's reference and do the
977  * back refs conversion.
978  *
979  * When a tree block is COWed through a tree, there are four cases:
980  *
981  * The reference count of the block is one and the tree is the block's
982  * owner tree. Nothing to do in this case.
983  *
984  * The reference count of the block is one and the tree is not the
985  * block's owner tree. In this case, full back refs is used for pointers
986  * in the block. Remove these full back refs, add implicit back refs for
987  * every pointers in the new block.
988  *
989  * The reference count of the block is greater than one and the tree is
990  * the block's owner tree. In this case, implicit back refs is used for
991  * pointers in the block. Add full back refs for every pointers in the
992  * block, increase lower level extents' reference counts. The original
993  * implicit back refs are entailed to the new block.
994  *
995  * The reference count of the block is greater than one and the tree is
996  * not the block's owner tree. Add implicit back refs for every pointer in
997  * the new block, increase lower level extents' reference count.
998  *
999  * Back Reference Key composing:
1000  *
1001  * The key objectid corresponds to the first byte in the extent,
1002  * The key type is used to differentiate between types of back refs.
1003  * There are different meanings of the key offset for different types
1004  * of back refs.
1005  *
1006  * File extents can be referenced by:
1007  *
1008  * - multiple snapshots, subvolumes, or different generations in one subvol
1009  * - different files inside a single subvolume
1010  * - different offsets inside a file (bookend extents in file.c)
1011  *
1012  * The extent ref structure for the implicit back refs has fields for:
1013  *
1014  * - Objectid of the subvolume root
1015  * - objectid of the file holding the reference
1016  * - original offset in the file
1017  * - how many bookend extents
1018  *
1019  * The key offset for the implicit back refs is hash of the first
1020  * three fields.
1021  *
1022  * The extent ref structure for the full back refs has field for:
1023  *
1024  * - number of pointers in the tree leaf
1025  *
1026  * The key offset for the implicit back refs is the first byte of
1027  * the tree leaf
1028  *
1029  * When a file extent is allocated, The implicit back refs is used.
1030  * the fields are filled in:
1031  *
1032  *     (root_key.objectid, inode objectid, offset in file, 1)
1033  *
1034  * When a file extent is removed file truncation, we find the
1035  * corresponding implicit back refs and check the following fields:
1036  *
1037  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1038  *
1039  * Btree extents can be referenced by:
1040  *
1041  * - Different subvolumes
1042  *
1043  * Both the implicit back refs and the full back refs for tree blocks
1044  * only consist of key. The key offset for the implicit back refs is
1045  * objectid of block's owner tree. The key offset for the full back refs
1046  * is the first byte of parent block.
1047  *
1048  * When implicit back refs is used, information about the lowest key and
1049  * level of the tree block are required. These information are stored in
1050  * tree block info structure.
1051  */
1052
1053 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1054 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1055                                   struct btrfs_root *root,
1056                                   struct btrfs_path *path,
1057                                   u64 owner, u32 extra_size)
1058 {
1059         struct btrfs_extent_item *item;
1060         struct btrfs_extent_item_v0 *ei0;
1061         struct btrfs_extent_ref_v0 *ref0;
1062         struct btrfs_tree_block_info *bi;
1063         struct extent_buffer *leaf;
1064         struct btrfs_key key;
1065         struct btrfs_key found_key;
1066         u32 new_size = sizeof(*item);
1067         u64 refs;
1068         int ret;
1069
1070         leaf = path->nodes[0];
1071         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1072
1073         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1074         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1075                              struct btrfs_extent_item_v0);
1076         refs = btrfs_extent_refs_v0(leaf, ei0);
1077
1078         if (owner == (u64)-1) {
1079                 while (1) {
1080                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1081                                 ret = btrfs_next_leaf(root, path);
1082                                 if (ret < 0)
1083                                         return ret;
1084                                 BUG_ON(ret > 0); /* Corruption */
1085                                 leaf = path->nodes[0];
1086                         }
1087                         btrfs_item_key_to_cpu(leaf, &found_key,
1088                                               path->slots[0]);
1089                         BUG_ON(key.objectid != found_key.objectid);
1090                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1091                                 path->slots[0]++;
1092                                 continue;
1093                         }
1094                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1095                                               struct btrfs_extent_ref_v0);
1096                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1097                         break;
1098                 }
1099         }
1100         btrfs_release_path(path);
1101
1102         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1103                 new_size += sizeof(*bi);
1104
1105         new_size -= sizeof(*ei0);
1106         ret = btrfs_search_slot(trans, root, &key, path,
1107                                 new_size + extra_size, 1);
1108         if (ret < 0)
1109                 return ret;
1110         BUG_ON(ret); /* Corruption */
1111
1112         btrfs_extend_item(root, path, new_size);
1113
1114         leaf = path->nodes[0];
1115         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1116         btrfs_set_extent_refs(leaf, item, refs);
1117         /* FIXME: get real generation */
1118         btrfs_set_extent_generation(leaf, item, 0);
1119         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1120                 btrfs_set_extent_flags(leaf, item,
1121                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1122                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1123                 bi = (struct btrfs_tree_block_info *)(item + 1);
1124                 /* FIXME: get first key of the block */
1125                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1126                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1127         } else {
1128                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1129         }
1130         btrfs_mark_buffer_dirty(leaf);
1131         return 0;
1132 }
1133 #endif
1134
1135 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1136 {
1137         u32 high_crc = ~(u32)0;
1138         u32 low_crc = ~(u32)0;
1139         __le64 lenum;
1140
1141         lenum = cpu_to_le64(root_objectid);
1142         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1143         lenum = cpu_to_le64(owner);
1144         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1145         lenum = cpu_to_le64(offset);
1146         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1147
1148         return ((u64)high_crc << 31) ^ (u64)low_crc;
1149 }
1150
1151 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1152                                      struct btrfs_extent_data_ref *ref)
1153 {
1154         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1155                                     btrfs_extent_data_ref_objectid(leaf, ref),
1156                                     btrfs_extent_data_ref_offset(leaf, ref));
1157 }
1158
1159 static int match_extent_data_ref(struct extent_buffer *leaf,
1160                                  struct btrfs_extent_data_ref *ref,
1161                                  u64 root_objectid, u64 owner, u64 offset)
1162 {
1163         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1164             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1165             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1166                 return 0;
1167         return 1;
1168 }
1169
1170 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1171                                            struct btrfs_root *root,
1172                                            struct btrfs_path *path,
1173                                            u64 bytenr, u64 parent,
1174                                            u64 root_objectid,
1175                                            u64 owner, u64 offset)
1176 {
1177         struct btrfs_key key;
1178         struct btrfs_extent_data_ref *ref;
1179         struct extent_buffer *leaf;
1180         u32 nritems;
1181         int ret;
1182         int recow;
1183         int err = -ENOENT;
1184
1185         key.objectid = bytenr;
1186         if (parent) {
1187                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1188                 key.offset = parent;
1189         } else {
1190                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1191                 key.offset = hash_extent_data_ref(root_objectid,
1192                                                   owner, offset);
1193         }
1194 again:
1195         recow = 0;
1196         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1197         if (ret < 0) {
1198                 err = ret;
1199                 goto fail;
1200         }
1201
1202         if (parent) {
1203                 if (!ret)
1204                         return 0;
1205 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1206                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1207                 btrfs_release_path(path);
1208                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1209                 if (ret < 0) {
1210                         err = ret;
1211                         goto fail;
1212                 }
1213                 if (!ret)
1214                         return 0;
1215 #endif
1216                 goto fail;
1217         }
1218
1219         leaf = path->nodes[0];
1220         nritems = btrfs_header_nritems(leaf);
1221         while (1) {
1222                 if (path->slots[0] >= nritems) {
1223                         ret = btrfs_next_leaf(root, path);
1224                         if (ret < 0)
1225                                 err = ret;
1226                         if (ret)
1227                                 goto fail;
1228
1229                         leaf = path->nodes[0];
1230                         nritems = btrfs_header_nritems(leaf);
1231                         recow = 1;
1232                 }
1233
1234                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1235                 if (key.objectid != bytenr ||
1236                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1237                         goto fail;
1238
1239                 ref = btrfs_item_ptr(leaf, path->slots[0],
1240                                      struct btrfs_extent_data_ref);
1241
1242                 if (match_extent_data_ref(leaf, ref, root_objectid,
1243                                           owner, offset)) {
1244                         if (recow) {
1245                                 btrfs_release_path(path);
1246                                 goto again;
1247                         }
1248                         err = 0;
1249                         break;
1250                 }
1251                 path->slots[0]++;
1252         }
1253 fail:
1254         return err;
1255 }
1256
1257 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1258                                            struct btrfs_root *root,
1259                                            struct btrfs_path *path,
1260                                            u64 bytenr, u64 parent,
1261                                            u64 root_objectid, u64 owner,
1262                                            u64 offset, int refs_to_add)
1263 {
1264         struct btrfs_key key;
1265         struct extent_buffer *leaf;
1266         u32 size;
1267         u32 num_refs;
1268         int ret;
1269
1270         key.objectid = bytenr;
1271         if (parent) {
1272                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1273                 key.offset = parent;
1274                 size = sizeof(struct btrfs_shared_data_ref);
1275         } else {
1276                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1277                 key.offset = hash_extent_data_ref(root_objectid,
1278                                                   owner, offset);
1279                 size = sizeof(struct btrfs_extent_data_ref);
1280         }
1281
1282         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1283         if (ret && ret != -EEXIST)
1284                 goto fail;
1285
1286         leaf = path->nodes[0];
1287         if (parent) {
1288                 struct btrfs_shared_data_ref *ref;
1289                 ref = btrfs_item_ptr(leaf, path->slots[0],
1290                                      struct btrfs_shared_data_ref);
1291                 if (ret == 0) {
1292                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1293                 } else {
1294                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1295                         num_refs += refs_to_add;
1296                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1297                 }
1298         } else {
1299                 struct btrfs_extent_data_ref *ref;
1300                 while (ret == -EEXIST) {
1301                         ref = btrfs_item_ptr(leaf, path->slots[0],
1302                                              struct btrfs_extent_data_ref);
1303                         if (match_extent_data_ref(leaf, ref, root_objectid,
1304                                                   owner, offset))
1305                                 break;
1306                         btrfs_release_path(path);
1307                         key.offset++;
1308                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1309                                                       size);
1310                         if (ret && ret != -EEXIST)
1311                                 goto fail;
1312
1313                         leaf = path->nodes[0];
1314                 }
1315                 ref = btrfs_item_ptr(leaf, path->slots[0],
1316                                      struct btrfs_extent_data_ref);
1317                 if (ret == 0) {
1318                         btrfs_set_extent_data_ref_root(leaf, ref,
1319                                                        root_objectid);
1320                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1321                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1322                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1323                 } else {
1324                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1325                         num_refs += refs_to_add;
1326                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1327                 }
1328         }
1329         btrfs_mark_buffer_dirty(leaf);
1330         ret = 0;
1331 fail:
1332         btrfs_release_path(path);
1333         return ret;
1334 }
1335
1336 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1337                                            struct btrfs_root *root,
1338                                            struct btrfs_path *path,
1339                                            int refs_to_drop, int *last_ref)
1340 {
1341         struct btrfs_key key;
1342         struct btrfs_extent_data_ref *ref1 = NULL;
1343         struct btrfs_shared_data_ref *ref2 = NULL;
1344         struct extent_buffer *leaf;
1345         u32 num_refs = 0;
1346         int ret = 0;
1347
1348         leaf = path->nodes[0];
1349         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1350
1351         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1352                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1353                                       struct btrfs_extent_data_ref);
1354                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1355         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1356                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1357                                       struct btrfs_shared_data_ref);
1358                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1359 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1360         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1361                 struct btrfs_extent_ref_v0 *ref0;
1362                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1363                                       struct btrfs_extent_ref_v0);
1364                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1365 #endif
1366         } else {
1367                 BUG();
1368         }
1369
1370         BUG_ON(num_refs < refs_to_drop);
1371         num_refs -= refs_to_drop;
1372
1373         if (num_refs == 0) {
1374                 ret = btrfs_del_item(trans, root, path);
1375                 *last_ref = 1;
1376         } else {
1377                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1378                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1379                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1380                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1381 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1382                 else {
1383                         struct btrfs_extent_ref_v0 *ref0;
1384                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1385                                         struct btrfs_extent_ref_v0);
1386                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1387                 }
1388 #endif
1389                 btrfs_mark_buffer_dirty(leaf);
1390         }
1391         return ret;
1392 }
1393
1394 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1395                                           struct btrfs_extent_inline_ref *iref)
1396 {
1397         struct btrfs_key key;
1398         struct extent_buffer *leaf;
1399         struct btrfs_extent_data_ref *ref1;
1400         struct btrfs_shared_data_ref *ref2;
1401         u32 num_refs = 0;
1402
1403         leaf = path->nodes[0];
1404         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1405         if (iref) {
1406                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1407                     BTRFS_EXTENT_DATA_REF_KEY) {
1408                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1409                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1410                 } else {
1411                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1412                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1413                 }
1414         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1415                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1416                                       struct btrfs_extent_data_ref);
1417                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1418         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1419                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1420                                       struct btrfs_shared_data_ref);
1421                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1422 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1423         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1424                 struct btrfs_extent_ref_v0 *ref0;
1425                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1426                                       struct btrfs_extent_ref_v0);
1427                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1428 #endif
1429         } else {
1430                 WARN_ON(1);
1431         }
1432         return num_refs;
1433 }
1434
1435 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1436                                           struct btrfs_root *root,
1437                                           struct btrfs_path *path,
1438                                           u64 bytenr, u64 parent,
1439                                           u64 root_objectid)
1440 {
1441         struct btrfs_key key;
1442         int ret;
1443
1444         key.objectid = bytenr;
1445         if (parent) {
1446                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1447                 key.offset = parent;
1448         } else {
1449                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1450                 key.offset = root_objectid;
1451         }
1452
1453         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1454         if (ret > 0)
1455                 ret = -ENOENT;
1456 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1457         if (ret == -ENOENT && parent) {
1458                 btrfs_release_path(path);
1459                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1460                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1461                 if (ret > 0)
1462                         ret = -ENOENT;
1463         }
1464 #endif
1465         return ret;
1466 }
1467
1468 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1469                                           struct btrfs_root *root,
1470                                           struct btrfs_path *path,
1471                                           u64 bytenr, u64 parent,
1472                                           u64 root_objectid)
1473 {
1474         struct btrfs_key key;
1475         int ret;
1476
1477         key.objectid = bytenr;
1478         if (parent) {
1479                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1480                 key.offset = parent;
1481         } else {
1482                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1483                 key.offset = root_objectid;
1484         }
1485
1486         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1487         btrfs_release_path(path);
1488         return ret;
1489 }
1490
1491 static inline int extent_ref_type(u64 parent, u64 owner)
1492 {
1493         int type;
1494         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1495                 if (parent > 0)
1496                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1497                 else
1498                         type = BTRFS_TREE_BLOCK_REF_KEY;
1499         } else {
1500                 if (parent > 0)
1501                         type = BTRFS_SHARED_DATA_REF_KEY;
1502                 else
1503                         type = BTRFS_EXTENT_DATA_REF_KEY;
1504         }
1505         return type;
1506 }
1507
1508 static int find_next_key(struct btrfs_path *path, int level,
1509                          struct btrfs_key *key)
1510
1511 {
1512         for (; level < BTRFS_MAX_LEVEL; level++) {
1513                 if (!path->nodes[level])
1514                         break;
1515                 if (path->slots[level] + 1 >=
1516                     btrfs_header_nritems(path->nodes[level]))
1517                         continue;
1518                 if (level == 0)
1519                         btrfs_item_key_to_cpu(path->nodes[level], key,
1520                                               path->slots[level] + 1);
1521                 else
1522                         btrfs_node_key_to_cpu(path->nodes[level], key,
1523                                               path->slots[level] + 1);
1524                 return 0;
1525         }
1526         return 1;
1527 }
1528
1529 /*
1530  * look for inline back ref. if back ref is found, *ref_ret is set
1531  * to the address of inline back ref, and 0 is returned.
1532  *
1533  * if back ref isn't found, *ref_ret is set to the address where it
1534  * should be inserted, and -ENOENT is returned.
1535  *
1536  * if insert is true and there are too many inline back refs, the path
1537  * points to the extent item, and -EAGAIN is returned.
1538  *
1539  * NOTE: inline back refs are ordered in the same way that back ref
1540  *       items in the tree are ordered.
1541  */
1542 static noinline_for_stack
1543 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1544                                  struct btrfs_root *root,
1545                                  struct btrfs_path *path,
1546                                  struct btrfs_extent_inline_ref **ref_ret,
1547                                  u64 bytenr, u64 num_bytes,
1548                                  u64 parent, u64 root_objectid,
1549                                  u64 owner, u64 offset, int insert)
1550 {
1551         struct btrfs_key key;
1552         struct extent_buffer *leaf;
1553         struct btrfs_extent_item *ei;
1554         struct btrfs_extent_inline_ref *iref;
1555         u64 flags;
1556         u64 item_size;
1557         unsigned long ptr;
1558         unsigned long end;
1559         int extra_size;
1560         int type;
1561         int want;
1562         int ret;
1563         int err = 0;
1564         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1565                                                  SKINNY_METADATA);
1566
1567         key.objectid = bytenr;
1568         key.type = BTRFS_EXTENT_ITEM_KEY;
1569         key.offset = num_bytes;
1570
1571         want = extent_ref_type(parent, owner);
1572         if (insert) {
1573                 extra_size = btrfs_extent_inline_ref_size(want);
1574                 path->keep_locks = 1;
1575         } else
1576                 extra_size = -1;
1577
1578         /*
1579          * Owner is our parent level, so we can just add one to get the level
1580          * for the block we are interested in.
1581          */
1582         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1583                 key.type = BTRFS_METADATA_ITEM_KEY;
1584                 key.offset = owner;
1585         }
1586
1587 again:
1588         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1589         if (ret < 0) {
1590                 err = ret;
1591                 goto out;
1592         }
1593
1594         /*
1595          * We may be a newly converted file system which still has the old fat
1596          * extent entries for metadata, so try and see if we have one of those.
1597          */
1598         if (ret > 0 && skinny_metadata) {
1599                 skinny_metadata = false;
1600                 if (path->slots[0]) {
1601                         path->slots[0]--;
1602                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1603                                               path->slots[0]);
1604                         if (key.objectid == bytenr &&
1605                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1606                             key.offset == num_bytes)
1607                                 ret = 0;
1608                 }
1609                 if (ret) {
1610                         key.objectid = bytenr;
1611                         key.type = BTRFS_EXTENT_ITEM_KEY;
1612                         key.offset = num_bytes;
1613                         btrfs_release_path(path);
1614                         goto again;
1615                 }
1616         }
1617
1618         if (ret && !insert) {
1619                 err = -ENOENT;
1620                 goto out;
1621         } else if (WARN_ON(ret)) {
1622                 err = -EIO;
1623                 goto out;
1624         }
1625
1626         leaf = path->nodes[0];
1627         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1628 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1629         if (item_size < sizeof(*ei)) {
1630                 if (!insert) {
1631                         err = -ENOENT;
1632                         goto out;
1633                 }
1634                 ret = convert_extent_item_v0(trans, root, path, owner,
1635                                              extra_size);
1636                 if (ret < 0) {
1637                         err = ret;
1638                         goto out;
1639                 }
1640                 leaf = path->nodes[0];
1641                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1642         }
1643 #endif
1644         BUG_ON(item_size < sizeof(*ei));
1645
1646         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1647         flags = btrfs_extent_flags(leaf, ei);
1648
1649         ptr = (unsigned long)(ei + 1);
1650         end = (unsigned long)ei + item_size;
1651
1652         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1653                 ptr += sizeof(struct btrfs_tree_block_info);
1654                 BUG_ON(ptr > end);
1655         }
1656
1657         err = -ENOENT;
1658         while (1) {
1659                 if (ptr >= end) {
1660                         WARN_ON(ptr > end);
1661                         break;
1662                 }
1663                 iref = (struct btrfs_extent_inline_ref *)ptr;
1664                 type = btrfs_extent_inline_ref_type(leaf, iref);
1665                 if (want < type)
1666                         break;
1667                 if (want > type) {
1668                         ptr += btrfs_extent_inline_ref_size(type);
1669                         continue;
1670                 }
1671
1672                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1673                         struct btrfs_extent_data_ref *dref;
1674                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1675                         if (match_extent_data_ref(leaf, dref, root_objectid,
1676                                                   owner, offset)) {
1677                                 err = 0;
1678                                 break;
1679                         }
1680                         if (hash_extent_data_ref_item(leaf, dref) <
1681                             hash_extent_data_ref(root_objectid, owner, offset))
1682                                 break;
1683                 } else {
1684                         u64 ref_offset;
1685                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1686                         if (parent > 0) {
1687                                 if (parent == ref_offset) {
1688                                         err = 0;
1689                                         break;
1690                                 }
1691                                 if (ref_offset < parent)
1692                                         break;
1693                         } else {
1694                                 if (root_objectid == ref_offset) {
1695                                         err = 0;
1696                                         break;
1697                                 }
1698                                 if (ref_offset < root_objectid)
1699                                         break;
1700                         }
1701                 }
1702                 ptr += btrfs_extent_inline_ref_size(type);
1703         }
1704         if (err == -ENOENT && insert) {
1705                 if (item_size + extra_size >=
1706                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1707                         err = -EAGAIN;
1708                         goto out;
1709                 }
1710                 /*
1711                  * To add new inline back ref, we have to make sure
1712                  * there is no corresponding back ref item.
1713                  * For simplicity, we just do not add new inline back
1714                  * ref if there is any kind of item for this block
1715                  */
1716                 if (find_next_key(path, 0, &key) == 0 &&
1717                     key.objectid == bytenr &&
1718                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1719                         err = -EAGAIN;
1720                         goto out;
1721                 }
1722         }
1723         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1724 out:
1725         if (insert) {
1726                 path->keep_locks = 0;
1727                 btrfs_unlock_up_safe(path, 1);
1728         }
1729         return err;
1730 }
1731
1732 /*
1733  * helper to add new inline back ref
1734  */
1735 static noinline_for_stack
1736 void setup_inline_extent_backref(struct btrfs_root *root,
1737                                  struct btrfs_path *path,
1738                                  struct btrfs_extent_inline_ref *iref,
1739                                  u64 parent, u64 root_objectid,
1740                                  u64 owner, u64 offset, int refs_to_add,
1741                                  struct btrfs_delayed_extent_op *extent_op)
1742 {
1743         struct extent_buffer *leaf;
1744         struct btrfs_extent_item *ei;
1745         unsigned long ptr;
1746         unsigned long end;
1747         unsigned long item_offset;
1748         u64 refs;
1749         int size;
1750         int type;
1751
1752         leaf = path->nodes[0];
1753         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1754         item_offset = (unsigned long)iref - (unsigned long)ei;
1755
1756         type = extent_ref_type(parent, owner);
1757         size = btrfs_extent_inline_ref_size(type);
1758
1759         btrfs_extend_item(root, path, size);
1760
1761         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1762         refs = btrfs_extent_refs(leaf, ei);
1763         refs += refs_to_add;
1764         btrfs_set_extent_refs(leaf, ei, refs);
1765         if (extent_op)
1766                 __run_delayed_extent_op(extent_op, leaf, ei);
1767
1768         ptr = (unsigned long)ei + item_offset;
1769         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1770         if (ptr < end - size)
1771                 memmove_extent_buffer(leaf, ptr + size, ptr,
1772                                       end - size - ptr);
1773
1774         iref = (struct btrfs_extent_inline_ref *)ptr;
1775         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1776         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1777                 struct btrfs_extent_data_ref *dref;
1778                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1779                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1780                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1781                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1782                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1783         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1784                 struct btrfs_shared_data_ref *sref;
1785                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1786                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1787                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1788         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1789                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1790         } else {
1791                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1792         }
1793         btrfs_mark_buffer_dirty(leaf);
1794 }
1795
1796 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1797                                  struct btrfs_root *root,
1798                                  struct btrfs_path *path,
1799                                  struct btrfs_extent_inline_ref **ref_ret,
1800                                  u64 bytenr, u64 num_bytes, u64 parent,
1801                                  u64 root_objectid, u64 owner, u64 offset)
1802 {
1803         int ret;
1804
1805         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1806                                            bytenr, num_bytes, parent,
1807                                            root_objectid, owner, offset, 0);
1808         if (ret != -ENOENT)
1809                 return ret;
1810
1811         btrfs_release_path(path);
1812         *ref_ret = NULL;
1813
1814         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1815                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1816                                             root_objectid);
1817         } else {
1818                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1819                                              root_objectid, owner, offset);
1820         }
1821         return ret;
1822 }
1823
1824 /*
1825  * helper to update/remove inline back ref
1826  */
1827 static noinline_for_stack
1828 void update_inline_extent_backref(struct btrfs_root *root,
1829                                   struct btrfs_path *path,
1830                                   struct btrfs_extent_inline_ref *iref,
1831                                   int refs_to_mod,
1832                                   struct btrfs_delayed_extent_op *extent_op,
1833                                   int *last_ref)
1834 {
1835         struct extent_buffer *leaf;
1836         struct btrfs_extent_item *ei;
1837         struct btrfs_extent_data_ref *dref = NULL;
1838         struct btrfs_shared_data_ref *sref = NULL;
1839         unsigned long ptr;
1840         unsigned long end;
1841         u32 item_size;
1842         int size;
1843         int type;
1844         u64 refs;
1845
1846         leaf = path->nodes[0];
1847         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1848         refs = btrfs_extent_refs(leaf, ei);
1849         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1850         refs += refs_to_mod;
1851         btrfs_set_extent_refs(leaf, ei, refs);
1852         if (extent_op)
1853                 __run_delayed_extent_op(extent_op, leaf, ei);
1854
1855         type = btrfs_extent_inline_ref_type(leaf, iref);
1856
1857         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1858                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1859                 refs = btrfs_extent_data_ref_count(leaf, dref);
1860         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1861                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1862                 refs = btrfs_shared_data_ref_count(leaf, sref);
1863         } else {
1864                 refs = 1;
1865                 BUG_ON(refs_to_mod != -1);
1866         }
1867
1868         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1869         refs += refs_to_mod;
1870
1871         if (refs > 0) {
1872                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1873                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1874                 else
1875                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1876         } else {
1877                 *last_ref = 1;
1878                 size =  btrfs_extent_inline_ref_size(type);
1879                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1880                 ptr = (unsigned long)iref;
1881                 end = (unsigned long)ei + item_size;
1882                 if (ptr + size < end)
1883                         memmove_extent_buffer(leaf, ptr, ptr + size,
1884                                               end - ptr - size);
1885                 item_size -= size;
1886                 btrfs_truncate_item(root, path, item_size, 1);
1887         }
1888         btrfs_mark_buffer_dirty(leaf);
1889 }
1890
1891 static noinline_for_stack
1892 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1893                                  struct btrfs_root *root,
1894                                  struct btrfs_path *path,
1895                                  u64 bytenr, u64 num_bytes, u64 parent,
1896                                  u64 root_objectid, u64 owner,
1897                                  u64 offset, int refs_to_add,
1898                                  struct btrfs_delayed_extent_op *extent_op)
1899 {
1900         struct btrfs_extent_inline_ref *iref;
1901         int ret;
1902
1903         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1904                                            bytenr, num_bytes, parent,
1905                                            root_objectid, owner, offset, 1);
1906         if (ret == 0) {
1907                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1908                 update_inline_extent_backref(root, path, iref,
1909                                              refs_to_add, extent_op, NULL);
1910         } else if (ret == -ENOENT) {
1911                 setup_inline_extent_backref(root, path, iref, parent,
1912                                             root_objectid, owner, offset,
1913                                             refs_to_add, extent_op);
1914                 ret = 0;
1915         }
1916         return ret;
1917 }
1918
1919 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1920                                  struct btrfs_root *root,
1921                                  struct btrfs_path *path,
1922                                  u64 bytenr, u64 parent, u64 root_objectid,
1923                                  u64 owner, u64 offset, int refs_to_add)
1924 {
1925         int ret;
1926         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1927                 BUG_ON(refs_to_add != 1);
1928                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1929                                             parent, root_objectid);
1930         } else {
1931                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1932                                              parent, root_objectid,
1933                                              owner, offset, refs_to_add);
1934         }
1935         return ret;
1936 }
1937
1938 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1939                                  struct btrfs_root *root,
1940                                  struct btrfs_path *path,
1941                                  struct btrfs_extent_inline_ref *iref,
1942                                  int refs_to_drop, int is_data, int *last_ref)
1943 {
1944         int ret = 0;
1945
1946         BUG_ON(!is_data && refs_to_drop != 1);
1947         if (iref) {
1948                 update_inline_extent_backref(root, path, iref,
1949                                              -refs_to_drop, NULL, last_ref);
1950         } else if (is_data) {
1951                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1952                                              last_ref);
1953         } else {
1954                 *last_ref = 1;
1955                 ret = btrfs_del_item(trans, root, path);
1956         }
1957         return ret;
1958 }
1959
1960 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1961 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1962                                u64 *discarded_bytes)
1963 {
1964         int j, ret = 0;
1965         u64 bytes_left, end;
1966         u64 aligned_start = ALIGN(start, 1 << 9);
1967
1968         if (WARN_ON(start != aligned_start)) {
1969                 len -= aligned_start - start;
1970                 len = round_down(len, 1 << 9);
1971                 start = aligned_start;
1972         }
1973
1974         *discarded_bytes = 0;
1975
1976         if (!len)
1977                 return 0;
1978
1979         end = start + len;
1980         bytes_left = len;
1981
1982         /* Skip any superblocks on this device. */
1983         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1984                 u64 sb_start = btrfs_sb_offset(j);
1985                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1986                 u64 size = sb_start - start;
1987
1988                 if (!in_range(sb_start, start, bytes_left) &&
1989                     !in_range(sb_end, start, bytes_left) &&
1990                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1991                         continue;
1992
1993                 /*
1994                  * Superblock spans beginning of range.  Adjust start and
1995                  * try again.
1996                  */
1997                 if (sb_start <= start) {
1998                         start += sb_end - start;
1999                         if (start > end) {
2000                                 bytes_left = 0;
2001                                 break;
2002                         }
2003                         bytes_left = end - start;
2004                         continue;
2005                 }
2006
2007                 if (size) {
2008                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2009                                                    GFP_NOFS, 0);
2010                         if (!ret)
2011                                 *discarded_bytes += size;
2012                         else if (ret != -EOPNOTSUPP)
2013                                 return ret;
2014                 }
2015
2016                 start = sb_end;
2017                 if (start > end) {
2018                         bytes_left = 0;
2019                         break;
2020                 }
2021                 bytes_left = end - start;
2022         }
2023
2024         if (bytes_left) {
2025                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2026                                            GFP_NOFS, 0);
2027                 if (!ret)
2028                         *discarded_bytes += bytes_left;
2029         }
2030         return ret;
2031 }
2032
2033 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2034                          u64 num_bytes, u64 *actual_bytes)
2035 {
2036         int ret;
2037         u64 discarded_bytes = 0;
2038         struct btrfs_bio *bbio = NULL;
2039
2040
2041         /*
2042          * Avoid races with device replace and make sure our bbio has devices
2043          * associated to its stripes that don't go away while we are discarding.
2044          */
2045         btrfs_bio_counter_inc_blocked(root->fs_info);
2046         /* Tell the block device(s) that the sectors can be discarded */
2047         ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
2048                               bytenr, &num_bytes, &bbio, 0);
2049         /* Error condition is -ENOMEM */
2050         if (!ret) {
2051                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2052                 int i;
2053
2054
2055                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2056                         u64 bytes;
2057                         if (!stripe->dev->can_discard)
2058                                 continue;
2059
2060                         ret = btrfs_issue_discard(stripe->dev->bdev,
2061                                                   stripe->physical,
2062                                                   stripe->length,
2063                                                   &bytes);
2064                         if (!ret)
2065                                 discarded_bytes += bytes;
2066                         else if (ret != -EOPNOTSUPP)
2067                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2068
2069                         /*
2070                          * Just in case we get back EOPNOTSUPP for some reason,
2071                          * just ignore the return value so we don't screw up
2072                          * people calling discard_extent.
2073                          */
2074                         ret = 0;
2075                 }
2076                 btrfs_put_bbio(bbio);
2077         }
2078         btrfs_bio_counter_dec(root->fs_info);
2079
2080         if (actual_bytes)
2081                 *actual_bytes = discarded_bytes;
2082
2083
2084         if (ret == -EOPNOTSUPP)
2085                 ret = 0;
2086         return ret;
2087 }
2088
2089 /* Can return -ENOMEM */
2090 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2091                          struct btrfs_root *root,
2092                          u64 bytenr, u64 num_bytes, u64 parent,
2093                          u64 root_objectid, u64 owner, u64 offset)
2094 {
2095         int ret;
2096         struct btrfs_fs_info *fs_info = root->fs_info;
2097
2098         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2099                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2100
2101         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2102                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2103                                         num_bytes,
2104                                         parent, root_objectid, (int)owner,
2105                                         BTRFS_ADD_DELAYED_REF, NULL);
2106         } else {
2107                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2108                                         num_bytes, parent, root_objectid,
2109                                         owner, offset, 0,
2110                                         BTRFS_ADD_DELAYED_REF, NULL);
2111         }
2112         return ret;
2113 }
2114
2115 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2116                                   struct btrfs_root *root,
2117                                   struct btrfs_delayed_ref_node *node,
2118                                   u64 parent, u64 root_objectid,
2119                                   u64 owner, u64 offset, int refs_to_add,
2120                                   struct btrfs_delayed_extent_op *extent_op)
2121 {
2122         struct btrfs_fs_info *fs_info = root->fs_info;
2123         struct btrfs_path *path;
2124         struct extent_buffer *leaf;
2125         struct btrfs_extent_item *item;
2126         struct btrfs_key key;
2127         u64 bytenr = node->bytenr;
2128         u64 num_bytes = node->num_bytes;
2129         u64 refs;
2130         int ret;
2131
2132         path = btrfs_alloc_path();
2133         if (!path)
2134                 return -ENOMEM;
2135
2136         path->reada = READA_FORWARD;
2137         path->leave_spinning = 1;
2138         /* this will setup the path even if it fails to insert the back ref */
2139         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2140                                            bytenr, num_bytes, parent,
2141                                            root_objectid, owner, offset,
2142                                            refs_to_add, extent_op);
2143         if ((ret < 0 && ret != -EAGAIN) || !ret)
2144                 goto out;
2145
2146         /*
2147          * Ok we had -EAGAIN which means we didn't have space to insert and
2148          * inline extent ref, so just update the reference count and add a
2149          * normal backref.
2150          */
2151         leaf = path->nodes[0];
2152         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2153         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2154         refs = btrfs_extent_refs(leaf, item);
2155         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2156         if (extent_op)
2157                 __run_delayed_extent_op(extent_op, leaf, item);
2158
2159         btrfs_mark_buffer_dirty(leaf);
2160         btrfs_release_path(path);
2161
2162         path->reada = READA_FORWARD;
2163         path->leave_spinning = 1;
2164         /* now insert the actual backref */
2165         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2166                                     path, bytenr, parent, root_objectid,
2167                                     owner, offset, refs_to_add);
2168         if (ret)
2169                 btrfs_abort_transaction(trans, ret);
2170 out:
2171         btrfs_free_path(path);
2172         return ret;
2173 }
2174
2175 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2176                                 struct btrfs_root *root,
2177                                 struct btrfs_delayed_ref_node *node,
2178                                 struct btrfs_delayed_extent_op *extent_op,
2179                                 int insert_reserved)
2180 {
2181         int ret = 0;
2182         struct btrfs_delayed_data_ref *ref;
2183         struct btrfs_key ins;
2184         u64 parent = 0;
2185         u64 ref_root = 0;
2186         u64 flags = 0;
2187
2188         ins.objectid = node->bytenr;
2189         ins.offset = node->num_bytes;
2190         ins.type = BTRFS_EXTENT_ITEM_KEY;
2191
2192         ref = btrfs_delayed_node_to_data_ref(node);
2193         trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
2194
2195         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2196                 parent = ref->parent;
2197         ref_root = ref->root;
2198
2199         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2200                 if (extent_op)
2201                         flags |= extent_op->flags_to_set;
2202                 ret = alloc_reserved_file_extent(trans, root,
2203                                                  parent, ref_root, flags,
2204                                                  ref->objectid, ref->offset,
2205                                                  &ins, node->ref_mod);
2206         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2207                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2208                                              ref_root, ref->objectid,
2209                                              ref->offset, node->ref_mod,
2210                                              extent_op);
2211         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2212                 ret = __btrfs_free_extent(trans, root, node, parent,
2213                                           ref_root, ref->objectid,
2214                                           ref->offset, node->ref_mod,
2215                                           extent_op);
2216         } else {
2217                 BUG();
2218         }
2219         return ret;
2220 }
2221
2222 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2223                                     struct extent_buffer *leaf,
2224                                     struct btrfs_extent_item *ei)
2225 {
2226         u64 flags = btrfs_extent_flags(leaf, ei);
2227         if (extent_op->update_flags) {
2228                 flags |= extent_op->flags_to_set;
2229                 btrfs_set_extent_flags(leaf, ei, flags);
2230         }
2231
2232         if (extent_op->update_key) {
2233                 struct btrfs_tree_block_info *bi;
2234                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2235                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2236                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2237         }
2238 }
2239
2240 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2241                                  struct btrfs_root *root,
2242                                  struct btrfs_delayed_ref_node *node,
2243                                  struct btrfs_delayed_extent_op *extent_op)
2244 {
2245         struct btrfs_key key;
2246         struct btrfs_path *path;
2247         struct btrfs_extent_item *ei;
2248         struct extent_buffer *leaf;
2249         u32 item_size;
2250         int ret;
2251         int err = 0;
2252         int metadata = !extent_op->is_data;
2253
2254         if (trans->aborted)
2255                 return 0;
2256
2257         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2258                 metadata = 0;
2259
2260         path = btrfs_alloc_path();
2261         if (!path)
2262                 return -ENOMEM;
2263
2264         key.objectid = node->bytenr;
2265
2266         if (metadata) {
2267                 key.type = BTRFS_METADATA_ITEM_KEY;
2268                 key.offset = extent_op->level;
2269         } else {
2270                 key.type = BTRFS_EXTENT_ITEM_KEY;
2271                 key.offset = node->num_bytes;
2272         }
2273
2274 again:
2275         path->reada = READA_FORWARD;
2276         path->leave_spinning = 1;
2277         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2278                                 path, 0, 1);
2279         if (ret < 0) {
2280                 err = ret;
2281                 goto out;
2282         }
2283         if (ret > 0) {
2284                 if (metadata) {
2285                         if (path->slots[0] > 0) {
2286                                 path->slots[0]--;
2287                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2288                                                       path->slots[0]);
2289                                 if (key.objectid == node->bytenr &&
2290                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2291                                     key.offset == node->num_bytes)
2292                                         ret = 0;
2293                         }
2294                         if (ret > 0) {
2295                                 btrfs_release_path(path);
2296                                 metadata = 0;
2297
2298                                 key.objectid = node->bytenr;
2299                                 key.offset = node->num_bytes;
2300                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2301                                 goto again;
2302                         }
2303                 } else {
2304                         err = -EIO;
2305                         goto out;
2306                 }
2307         }
2308
2309         leaf = path->nodes[0];
2310         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2311 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2312         if (item_size < sizeof(*ei)) {
2313                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2314                                              path, (u64)-1, 0);
2315                 if (ret < 0) {
2316                         err = ret;
2317                         goto out;
2318                 }
2319                 leaf = path->nodes[0];
2320                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2321         }
2322 #endif
2323         BUG_ON(item_size < sizeof(*ei));
2324         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2325         __run_delayed_extent_op(extent_op, leaf, ei);
2326
2327         btrfs_mark_buffer_dirty(leaf);
2328 out:
2329         btrfs_free_path(path);
2330         return err;
2331 }
2332
2333 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2334                                 struct btrfs_root *root,
2335                                 struct btrfs_delayed_ref_node *node,
2336                                 struct btrfs_delayed_extent_op *extent_op,
2337                                 int insert_reserved)
2338 {
2339         int ret = 0;
2340         struct btrfs_delayed_tree_ref *ref;
2341         struct btrfs_key ins;
2342         u64 parent = 0;
2343         u64 ref_root = 0;
2344         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2345                                                  SKINNY_METADATA);
2346
2347         ref = btrfs_delayed_node_to_tree_ref(node);
2348         trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
2349
2350         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2351                 parent = ref->parent;
2352         ref_root = ref->root;
2353
2354         ins.objectid = node->bytenr;
2355         if (skinny_metadata) {
2356                 ins.offset = ref->level;
2357                 ins.type = BTRFS_METADATA_ITEM_KEY;
2358         } else {
2359                 ins.offset = node->num_bytes;
2360                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2361         }
2362
2363         BUG_ON(node->ref_mod != 1);
2364         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2365                 BUG_ON(!extent_op || !extent_op->update_flags);
2366                 ret = alloc_reserved_tree_block(trans, root,
2367                                                 parent, ref_root,
2368                                                 extent_op->flags_to_set,
2369                                                 &extent_op->key,
2370                                                 ref->level, &ins);
2371         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2372                 ret = __btrfs_inc_extent_ref(trans, root, node,
2373                                              parent, ref_root,
2374                                              ref->level, 0, 1,
2375                                              extent_op);
2376         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2377                 ret = __btrfs_free_extent(trans, root, node,
2378                                           parent, ref_root,
2379                                           ref->level, 0, 1, extent_op);
2380         } else {
2381                 BUG();
2382         }
2383         return ret;
2384 }
2385
2386 /* helper function to actually process a single delayed ref entry */
2387 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2388                                struct btrfs_root *root,
2389                                struct btrfs_delayed_ref_node *node,
2390                                struct btrfs_delayed_extent_op *extent_op,
2391                                int insert_reserved)
2392 {
2393         int ret = 0;
2394
2395         if (trans->aborted) {
2396                 if (insert_reserved)
2397                         btrfs_pin_extent(root, node->bytenr,
2398                                          node->num_bytes, 1);
2399                 return 0;
2400         }
2401
2402         if (btrfs_delayed_ref_is_head(node)) {
2403                 struct btrfs_delayed_ref_head *head;
2404                 /*
2405                  * we've hit the end of the chain and we were supposed
2406                  * to insert this extent into the tree.  But, it got
2407                  * deleted before we ever needed to insert it, so all
2408                  * we have to do is clean up the accounting
2409                  */
2410                 BUG_ON(extent_op);
2411                 head = btrfs_delayed_node_to_head(node);
2412                 trace_run_delayed_ref_head(root->fs_info, node, head,
2413                                            node->action);
2414
2415                 if (insert_reserved) {
2416                         btrfs_pin_extent(root, node->bytenr,
2417                                          node->num_bytes, 1);
2418                         if (head->is_data) {
2419                                 ret = btrfs_del_csums(trans, root,
2420                                                       node->bytenr,
2421                                                       node->num_bytes);
2422                         }
2423                 }
2424
2425                 /* Also free its reserved qgroup space */
2426                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2427                                               head->qgroup_ref_root,
2428                                               head->qgroup_reserved);
2429                 return ret;
2430         }
2431
2432         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2433             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2434                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2435                                            insert_reserved);
2436         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2437                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2438                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2439                                            insert_reserved);
2440         else
2441                 BUG();
2442         return ret;
2443 }
2444
2445 static inline struct btrfs_delayed_ref_node *
2446 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2447 {
2448         struct btrfs_delayed_ref_node *ref;
2449
2450         if (list_empty(&head->ref_list))
2451                 return NULL;
2452
2453         /*
2454          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2455          * This is to prevent a ref count from going down to zero, which deletes
2456          * the extent item from the extent tree, when there still are references
2457          * to add, which would fail because they would not find the extent item.
2458          */
2459         list_for_each_entry(ref, &head->ref_list, list) {
2460                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2461                         return ref;
2462         }
2463
2464         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2465                           list);
2466 }
2467
2468 /*
2469  * Returns 0 on success or if called with an already aborted transaction.
2470  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2471  */
2472 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2473                                              struct btrfs_root *root,
2474                                              unsigned long nr)
2475 {
2476         struct btrfs_delayed_ref_root *delayed_refs;
2477         struct btrfs_delayed_ref_node *ref;
2478         struct btrfs_delayed_ref_head *locked_ref = NULL;
2479         struct btrfs_delayed_extent_op *extent_op;
2480         struct btrfs_fs_info *fs_info = root->fs_info;
2481         ktime_t start = ktime_get();
2482         int ret;
2483         unsigned long count = 0;
2484         unsigned long actual_count = 0;
2485         int must_insert_reserved = 0;
2486
2487         delayed_refs = &trans->transaction->delayed_refs;
2488         while (1) {
2489                 if (!locked_ref) {
2490                         if (count >= nr)
2491                                 break;
2492
2493                         spin_lock(&delayed_refs->lock);
2494                         locked_ref = btrfs_select_ref_head(trans);
2495                         if (!locked_ref) {
2496                                 spin_unlock(&delayed_refs->lock);
2497                                 break;
2498                         }
2499
2500                         /* grab the lock that says we are going to process
2501                          * all the refs for this head */
2502                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2503                         spin_unlock(&delayed_refs->lock);
2504                         /*
2505                          * we may have dropped the spin lock to get the head
2506                          * mutex lock, and that might have given someone else
2507                          * time to free the head.  If that's true, it has been
2508                          * removed from our list and we can move on.
2509                          */
2510                         if (ret == -EAGAIN) {
2511                                 locked_ref = NULL;
2512                                 count++;
2513                                 continue;
2514                         }
2515                 }
2516
2517                 /*
2518                  * We need to try and merge add/drops of the same ref since we
2519                  * can run into issues with relocate dropping the implicit ref
2520                  * and then it being added back again before the drop can
2521                  * finish.  If we merged anything we need to re-loop so we can
2522                  * get a good ref.
2523                  * Or we can get node references of the same type that weren't
2524                  * merged when created due to bumps in the tree mod seq, and
2525                  * we need to merge them to prevent adding an inline extent
2526                  * backref before dropping it (triggering a BUG_ON at
2527                  * insert_inline_extent_backref()).
2528                  */
2529                 spin_lock(&locked_ref->lock);
2530                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2531                                          locked_ref);
2532
2533                 /*
2534                  * locked_ref is the head node, so we have to go one
2535                  * node back for any delayed ref updates
2536                  */
2537                 ref = select_delayed_ref(locked_ref);
2538
2539                 if (ref && ref->seq &&
2540                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2541                         spin_unlock(&locked_ref->lock);
2542                         btrfs_delayed_ref_unlock(locked_ref);
2543                         spin_lock(&delayed_refs->lock);
2544                         locked_ref->processing = 0;
2545                         delayed_refs->num_heads_ready++;
2546                         spin_unlock(&delayed_refs->lock);
2547                         locked_ref = NULL;
2548                         cond_resched();
2549                         count++;
2550                         continue;
2551                 }
2552
2553                 /*
2554                  * record the must insert reserved flag before we
2555                  * drop the spin lock.
2556                  */
2557                 must_insert_reserved = locked_ref->must_insert_reserved;
2558                 locked_ref->must_insert_reserved = 0;
2559
2560                 extent_op = locked_ref->extent_op;
2561                 locked_ref->extent_op = NULL;
2562
2563                 if (!ref) {
2564
2565
2566                         /* All delayed refs have been processed, Go ahead
2567                          * and send the head node to run_one_delayed_ref,
2568                          * so that any accounting fixes can happen
2569                          */
2570                         ref = &locked_ref->node;
2571
2572                         if (extent_op && must_insert_reserved) {
2573                                 btrfs_free_delayed_extent_op(extent_op);
2574                                 extent_op = NULL;
2575                         }
2576
2577                         if (extent_op) {
2578                                 spin_unlock(&locked_ref->lock);
2579                                 ret = run_delayed_extent_op(trans, root,
2580                                                             ref, extent_op);
2581                                 btrfs_free_delayed_extent_op(extent_op);
2582
2583                                 if (ret) {
2584                                         /*
2585                                          * Need to reset must_insert_reserved if
2586                                          * there was an error so the abort stuff
2587                                          * can cleanup the reserved space
2588                                          * properly.
2589                                          */
2590                                         if (must_insert_reserved)
2591                                                 locked_ref->must_insert_reserved = 1;
2592                                         locked_ref->processing = 0;
2593                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2594                                         btrfs_delayed_ref_unlock(locked_ref);
2595                                         return ret;
2596                                 }
2597                                 continue;
2598                         }
2599
2600                         /*
2601                          * Need to drop our head ref lock and re-acquire the
2602                          * delayed ref lock and then re-check to make sure
2603                          * nobody got added.
2604                          */
2605                         spin_unlock(&locked_ref->lock);
2606                         spin_lock(&delayed_refs->lock);
2607                         spin_lock(&locked_ref->lock);
2608                         if (!list_empty(&locked_ref->ref_list) ||
2609                             locked_ref->extent_op) {
2610                                 spin_unlock(&locked_ref->lock);
2611                                 spin_unlock(&delayed_refs->lock);
2612                                 continue;
2613                         }
2614                         ref->in_tree = 0;
2615                         delayed_refs->num_heads--;
2616                         rb_erase(&locked_ref->href_node,
2617                                  &delayed_refs->href_root);
2618                         spin_unlock(&delayed_refs->lock);
2619                 } else {
2620                         actual_count++;
2621                         ref->in_tree = 0;
2622                         list_del(&ref->list);
2623                 }
2624                 atomic_dec(&delayed_refs->num_entries);
2625
2626                 if (!btrfs_delayed_ref_is_head(ref)) {
2627                         /*
2628                          * when we play the delayed ref, also correct the
2629                          * ref_mod on head
2630                          */
2631                         switch (ref->action) {
2632                         case BTRFS_ADD_DELAYED_REF:
2633                         case BTRFS_ADD_DELAYED_EXTENT:
2634                                 locked_ref->node.ref_mod -= ref->ref_mod;
2635                                 break;
2636                         case BTRFS_DROP_DELAYED_REF:
2637                                 locked_ref->node.ref_mod += ref->ref_mod;
2638                                 break;
2639                         default:
2640                                 WARN_ON(1);
2641                         }
2642                 }
2643                 spin_unlock(&locked_ref->lock);
2644
2645                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2646                                           must_insert_reserved);
2647
2648                 btrfs_free_delayed_extent_op(extent_op);
2649                 if (ret) {
2650                         locked_ref->processing = 0;
2651                         btrfs_delayed_ref_unlock(locked_ref);
2652                         btrfs_put_delayed_ref(ref);
2653                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2654                         return ret;
2655                 }
2656
2657                 /*
2658                  * If this node is a head, that means all the refs in this head
2659                  * have been dealt with, and we will pick the next head to deal
2660                  * with, so we must unlock the head and drop it from the cluster
2661                  * list before we release it.
2662                  */
2663                 if (btrfs_delayed_ref_is_head(ref)) {
2664                         if (locked_ref->is_data &&
2665                             locked_ref->total_ref_mod < 0) {
2666                                 spin_lock(&delayed_refs->lock);
2667                                 delayed_refs->pending_csums -= ref->num_bytes;
2668                                 spin_unlock(&delayed_refs->lock);
2669                         }
2670                         btrfs_delayed_ref_unlock(locked_ref);
2671                         locked_ref = NULL;
2672                 }
2673                 btrfs_put_delayed_ref(ref);
2674                 count++;
2675                 cond_resched();
2676         }
2677
2678         /*
2679          * We don't want to include ref heads since we can have empty ref heads
2680          * and those will drastically skew our runtime down since we just do
2681          * accounting, no actual extent tree updates.
2682          */
2683         if (actual_count > 0) {
2684                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2685                 u64 avg;
2686
2687                 /*
2688                  * We weigh the current average higher than our current runtime
2689                  * to avoid large swings in the average.
2690                  */
2691                 spin_lock(&delayed_refs->lock);
2692                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2693                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2694                 spin_unlock(&delayed_refs->lock);
2695         }
2696         return 0;
2697 }
2698
2699 #ifdef SCRAMBLE_DELAYED_REFS
2700 /*
2701  * Normally delayed refs get processed in ascending bytenr order. This
2702  * correlates in most cases to the order added. To expose dependencies on this
2703  * order, we start to process the tree in the middle instead of the beginning
2704  */
2705 static u64 find_middle(struct rb_root *root)
2706 {
2707         struct rb_node *n = root->rb_node;
2708         struct btrfs_delayed_ref_node *entry;
2709         int alt = 1;
2710         u64 middle;
2711         u64 first = 0, last = 0;
2712
2713         n = rb_first(root);
2714         if (n) {
2715                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2716                 first = entry->bytenr;
2717         }
2718         n = rb_last(root);
2719         if (n) {
2720                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2721                 last = entry->bytenr;
2722         }
2723         n = root->rb_node;
2724
2725         while (n) {
2726                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2727                 WARN_ON(!entry->in_tree);
2728
2729                 middle = entry->bytenr;
2730
2731                 if (alt)
2732                         n = n->rb_left;
2733                 else
2734                         n = n->rb_right;
2735
2736                 alt = 1 - alt;
2737         }
2738         return middle;
2739 }
2740 #endif
2741
2742 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2743 {
2744         u64 num_bytes;
2745
2746         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2747                              sizeof(struct btrfs_extent_inline_ref));
2748         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2749                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2750
2751         /*
2752          * We don't ever fill up leaves all the way so multiply by 2 just to be
2753          * closer to what we're really going to want to use.
2754          */
2755         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2756 }
2757
2758 /*
2759  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2760  * would require to store the csums for that many bytes.
2761  */
2762 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2763 {
2764         u64 csum_size;
2765         u64 num_csums_per_leaf;
2766         u64 num_csums;
2767
2768         csum_size = BTRFS_MAX_ITEM_SIZE(root);
2769         num_csums_per_leaf = div64_u64(csum_size,
2770                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2771         num_csums = div64_u64(csum_bytes, root->sectorsize);
2772         num_csums += num_csums_per_leaf - 1;
2773         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2774         return num_csums;
2775 }
2776
2777 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2778                                        struct btrfs_root *root)
2779 {
2780         struct btrfs_block_rsv *global_rsv;
2781         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2782         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2783         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2784         u64 num_bytes, num_dirty_bgs_bytes;
2785         int ret = 0;
2786
2787         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2788         num_heads = heads_to_leaves(root, num_heads);
2789         if (num_heads > 1)
2790                 num_bytes += (num_heads - 1) * root->nodesize;
2791         num_bytes <<= 1;
2792         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2793         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2794                                                              num_dirty_bgs);
2795         global_rsv = &root->fs_info->global_block_rsv;
2796
2797         /*
2798          * If we can't allocate any more chunks lets make sure we have _lots_ of
2799          * wiggle room since running delayed refs can create more delayed refs.
2800          */
2801         if (global_rsv->space_info->full) {
2802                 num_dirty_bgs_bytes <<= 1;
2803                 num_bytes <<= 1;
2804         }
2805
2806         spin_lock(&global_rsv->lock);
2807         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2808                 ret = 1;
2809         spin_unlock(&global_rsv->lock);
2810         return ret;
2811 }
2812
2813 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2814                                        struct btrfs_root *root)
2815 {
2816         struct btrfs_fs_info *fs_info = root->fs_info;
2817         u64 num_entries =
2818                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2819         u64 avg_runtime;
2820         u64 val;
2821
2822         smp_mb();
2823         avg_runtime = fs_info->avg_delayed_ref_runtime;
2824         val = num_entries * avg_runtime;
2825         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2826                 return 1;
2827         if (val >= NSEC_PER_SEC / 2)
2828                 return 2;
2829
2830         return btrfs_check_space_for_delayed_refs(trans, root);
2831 }
2832
2833 struct async_delayed_refs {
2834         struct btrfs_root *root;
2835         u64 transid;
2836         int count;
2837         int error;
2838         int sync;
2839         struct completion wait;
2840         struct btrfs_work work;
2841 };
2842
2843 static void delayed_ref_async_start(struct btrfs_work *work)
2844 {
2845         struct async_delayed_refs *async;
2846         struct btrfs_trans_handle *trans;
2847         int ret;
2848
2849         async = container_of(work, struct async_delayed_refs, work);
2850
2851         /* if the commit is already started, we don't need to wait here */
2852         if (btrfs_transaction_blocked(async->root->fs_info))
2853                 goto done;
2854
2855         trans = btrfs_join_transaction(async->root);
2856         if (IS_ERR(trans)) {
2857                 async->error = PTR_ERR(trans);
2858                 goto done;
2859         }
2860
2861         /*
2862          * trans->sync means that when we call end_transaction, we won't
2863          * wait on delayed refs
2864          */
2865         trans->sync = true;
2866
2867         /* Don't bother flushing if we got into a different transaction */
2868         if (trans->transid > async->transid)
2869                 goto end;
2870
2871         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2872         if (ret)
2873                 async->error = ret;
2874 end:
2875         ret = btrfs_end_transaction(trans, async->root);
2876         if (ret && !async->error)
2877                 async->error = ret;
2878 done:
2879         if (async->sync)
2880                 complete(&async->wait);
2881         else
2882                 kfree(async);
2883 }
2884
2885 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2886                                  unsigned long count, u64 transid, int wait)
2887 {
2888         struct async_delayed_refs *async;
2889         int ret;
2890
2891         async = kmalloc(sizeof(*async), GFP_NOFS);
2892         if (!async)
2893                 return -ENOMEM;
2894
2895         async->root = root->fs_info->tree_root;
2896         async->count = count;
2897         async->error = 0;
2898         async->transid = transid;
2899         if (wait)
2900                 async->sync = 1;
2901         else
2902                 async->sync = 0;
2903         init_completion(&async->wait);
2904
2905         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2906                         delayed_ref_async_start, NULL, NULL);
2907
2908         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2909
2910         if (wait) {
2911                 wait_for_completion(&async->wait);
2912                 ret = async->error;
2913                 kfree(async);
2914                 return ret;
2915         }
2916         return 0;
2917 }
2918
2919 /*
2920  * this starts processing the delayed reference count updates and
2921  * extent insertions we have queued up so far.  count can be
2922  * 0, which means to process everything in the tree at the start
2923  * of the run (but not newly added entries), or it can be some target
2924  * number you'd like to process.
2925  *
2926  * Returns 0 on success or if called with an aborted transaction
2927  * Returns <0 on error and aborts the transaction
2928  */
2929 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2930                            struct btrfs_root *root, unsigned long count)
2931 {
2932         struct rb_node *node;
2933         struct btrfs_delayed_ref_root *delayed_refs;
2934         struct btrfs_delayed_ref_head *head;
2935         int ret;
2936         int run_all = count == (unsigned long)-1;
2937         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2938
2939         /* We'll clean this up in btrfs_cleanup_transaction */
2940         if (trans->aborted)
2941                 return 0;
2942
2943         if (root->fs_info->creating_free_space_tree)
2944                 return 0;
2945
2946         if (root == root->fs_info->extent_root)
2947                 root = root->fs_info->tree_root;
2948
2949         delayed_refs = &trans->transaction->delayed_refs;
2950         if (count == 0)
2951                 count = atomic_read(&delayed_refs->num_entries) * 2;
2952
2953 again:
2954 #ifdef SCRAMBLE_DELAYED_REFS
2955         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2956 #endif
2957         trans->can_flush_pending_bgs = false;
2958         ret = __btrfs_run_delayed_refs(trans, root, count);
2959         if (ret < 0) {
2960                 btrfs_abort_transaction(trans, ret);
2961                 return ret;
2962         }
2963
2964         if (run_all) {
2965                 if (!list_empty(&trans->new_bgs))
2966                         btrfs_create_pending_block_groups(trans, root);
2967
2968                 spin_lock(&delayed_refs->lock);
2969                 node = rb_first(&delayed_refs->href_root);
2970                 if (!node) {
2971                         spin_unlock(&delayed_refs->lock);
2972                         goto out;
2973                 }
2974                 count = (unsigned long)-1;
2975
2976                 while (node) {
2977                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2978                                         href_node);
2979                         if (btrfs_delayed_ref_is_head(&head->node)) {
2980                                 struct btrfs_delayed_ref_node *ref;
2981
2982                                 ref = &head->node;
2983                                 atomic_inc(&ref->refs);
2984
2985                                 spin_unlock(&delayed_refs->lock);
2986                                 /*
2987                                  * Mutex was contended, block until it's
2988                                  * released and try again
2989                                  */
2990                                 mutex_lock(&head->mutex);
2991                                 mutex_unlock(&head->mutex);
2992
2993                                 btrfs_put_delayed_ref(ref);
2994                                 cond_resched();
2995                                 goto again;
2996                         } else {
2997                                 WARN_ON(1);
2998                         }
2999                         node = rb_next(node);
3000                 }
3001                 spin_unlock(&delayed_refs->lock);
3002                 cond_resched();
3003                 goto again;
3004         }
3005 out:
3006         assert_qgroups_uptodate(trans);
3007         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3008         return 0;
3009 }
3010
3011 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3012                                 struct btrfs_root *root,
3013                                 u64 bytenr, u64 num_bytes, u64 flags,
3014                                 int level, int is_data)
3015 {
3016         struct btrfs_delayed_extent_op *extent_op;
3017         int ret;
3018
3019         extent_op = btrfs_alloc_delayed_extent_op();
3020         if (!extent_op)
3021                 return -ENOMEM;
3022
3023         extent_op->flags_to_set = flags;
3024         extent_op->update_flags = true;
3025         extent_op->update_key = false;
3026         extent_op->is_data = is_data ? true : false;
3027         extent_op->level = level;
3028
3029         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3030                                           num_bytes, extent_op);
3031         if (ret)
3032                 btrfs_free_delayed_extent_op(extent_op);
3033         return ret;
3034 }
3035
3036 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3037                                       struct btrfs_root *root,
3038                                       struct btrfs_path *path,
3039                                       u64 objectid, u64 offset, u64 bytenr)
3040 {
3041         struct btrfs_delayed_ref_head *head;
3042         struct btrfs_delayed_ref_node *ref;
3043         struct btrfs_delayed_data_ref *data_ref;
3044         struct btrfs_delayed_ref_root *delayed_refs;
3045         int ret = 0;
3046
3047         delayed_refs = &trans->transaction->delayed_refs;
3048         spin_lock(&delayed_refs->lock);
3049         head = btrfs_find_delayed_ref_head(trans, bytenr);
3050         if (!head) {
3051                 spin_unlock(&delayed_refs->lock);
3052                 return 0;
3053         }
3054
3055         if (!mutex_trylock(&head->mutex)) {
3056                 atomic_inc(&head->node.refs);
3057                 spin_unlock(&delayed_refs->lock);
3058
3059                 btrfs_release_path(path);
3060
3061                 /*
3062                  * Mutex was contended, block until it's released and let
3063                  * caller try again
3064                  */
3065                 mutex_lock(&head->mutex);
3066                 mutex_unlock(&head->mutex);
3067                 btrfs_put_delayed_ref(&head->node);
3068                 return -EAGAIN;
3069         }
3070         spin_unlock(&delayed_refs->lock);
3071
3072         spin_lock(&head->lock);
3073         list_for_each_entry(ref, &head->ref_list, list) {
3074                 /* If it's a shared ref we know a cross reference exists */
3075                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3076                         ret = 1;
3077                         break;
3078                 }
3079
3080                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3081
3082                 /*
3083                  * If our ref doesn't match the one we're currently looking at
3084                  * then we have a cross reference.
3085                  */
3086                 if (data_ref->root != root->root_key.objectid ||
3087                     data_ref->objectid != objectid ||
3088                     data_ref->offset != offset) {
3089                         ret = 1;
3090                         break;
3091                 }
3092         }
3093         spin_unlock(&head->lock);
3094         mutex_unlock(&head->mutex);
3095         return ret;
3096 }
3097
3098 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3099                                         struct btrfs_root *root,
3100                                         struct btrfs_path *path,
3101                                         u64 objectid, u64 offset, u64 bytenr)
3102 {
3103         struct btrfs_root *extent_root = root->fs_info->extent_root;
3104         struct extent_buffer *leaf;
3105         struct btrfs_extent_data_ref *ref;
3106         struct btrfs_extent_inline_ref *iref;
3107         struct btrfs_extent_item *ei;
3108         struct btrfs_key key;
3109         u32 item_size;
3110         int ret;
3111
3112         key.objectid = bytenr;
3113         key.offset = (u64)-1;
3114         key.type = BTRFS_EXTENT_ITEM_KEY;
3115
3116         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3117         if (ret < 0)
3118                 goto out;
3119         BUG_ON(ret == 0); /* Corruption */
3120
3121         ret = -ENOENT;
3122         if (path->slots[0] == 0)
3123                 goto out;
3124
3125         path->slots[0]--;
3126         leaf = path->nodes[0];
3127         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3128
3129         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3130                 goto out;
3131
3132         ret = 1;
3133         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3134 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3135         if (item_size < sizeof(*ei)) {
3136                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3137                 goto out;
3138         }
3139 #endif
3140         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3141
3142         if (item_size != sizeof(*ei) +
3143             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3144                 goto out;
3145
3146         if (btrfs_extent_generation(leaf, ei) <=
3147             btrfs_root_last_snapshot(&root->root_item))
3148                 goto out;
3149
3150         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3151         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3152             BTRFS_EXTENT_DATA_REF_KEY)
3153                 goto out;
3154
3155         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3156         if (btrfs_extent_refs(leaf, ei) !=
3157             btrfs_extent_data_ref_count(leaf, ref) ||
3158             btrfs_extent_data_ref_root(leaf, ref) !=
3159             root->root_key.objectid ||
3160             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3161             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3162                 goto out;
3163
3164         ret = 0;
3165 out:
3166         return ret;
3167 }
3168
3169 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3170                           struct btrfs_root *root,
3171                           u64 objectid, u64 offset, u64 bytenr)
3172 {
3173         struct btrfs_path *path;
3174         int ret;
3175         int ret2;
3176
3177         path = btrfs_alloc_path();
3178         if (!path)
3179                 return -ENOENT;
3180
3181         do {
3182                 ret = check_committed_ref(trans, root, path, objectid,
3183                                           offset, bytenr);
3184                 if (ret && ret != -ENOENT)
3185                         goto out;
3186
3187                 ret2 = check_delayed_ref(trans, root, path, objectid,
3188                                          offset, bytenr);
3189         } while (ret2 == -EAGAIN);
3190
3191         if (ret2 && ret2 != -ENOENT) {
3192                 ret = ret2;
3193                 goto out;
3194         }
3195
3196         if (ret != -ENOENT || ret2 != -ENOENT)
3197                 ret = 0;
3198 out:
3199         btrfs_free_path(path);
3200         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3201                 WARN_ON(ret > 0);
3202         return ret;
3203 }
3204
3205 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3206                            struct btrfs_root *root,
3207                            struct extent_buffer *buf,
3208                            int full_backref, int inc)
3209 {
3210         u64 bytenr;
3211         u64 num_bytes;
3212         u64 parent;
3213         u64 ref_root;
3214         u32 nritems;
3215         struct btrfs_key key;
3216         struct btrfs_file_extent_item *fi;
3217         int i;
3218         int level;
3219         int ret = 0;
3220         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3221                             u64, u64, u64, u64, u64, u64);
3222
3223
3224         if (btrfs_is_testing(root->fs_info))
3225                 return 0;
3226
3227         ref_root = btrfs_header_owner(buf);
3228         nritems = btrfs_header_nritems(buf);
3229         level = btrfs_header_level(buf);
3230
3231         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3232                 return 0;
3233
3234         if (inc)
3235                 process_func = btrfs_inc_extent_ref;
3236         else
3237                 process_func = btrfs_free_extent;
3238
3239         if (full_backref)
3240                 parent = buf->start;
3241         else
3242                 parent = 0;
3243
3244         for (i = 0; i < nritems; i++) {
3245                 if (level == 0) {
3246                         btrfs_item_key_to_cpu(buf, &key, i);
3247                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3248                                 continue;
3249                         fi = btrfs_item_ptr(buf, i,
3250                                             struct btrfs_file_extent_item);
3251                         if (btrfs_file_extent_type(buf, fi) ==
3252                             BTRFS_FILE_EXTENT_INLINE)
3253                                 continue;
3254                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3255                         if (bytenr == 0)
3256                                 continue;
3257
3258                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3259                         key.offset -= btrfs_file_extent_offset(buf, fi);
3260                         ret = process_func(trans, root, bytenr, num_bytes,
3261                                            parent, ref_root, key.objectid,
3262                                            key.offset);
3263                         if (ret)
3264                                 goto fail;
3265                 } else {
3266                         bytenr = btrfs_node_blockptr(buf, i);
3267                         num_bytes = root->nodesize;
3268                         ret = process_func(trans, root, bytenr, num_bytes,
3269                                            parent, ref_root, level - 1, 0);
3270                         if (ret)
3271                                 goto fail;
3272                 }
3273         }
3274         return 0;
3275 fail:
3276         return ret;
3277 }
3278
3279 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3280                   struct extent_buffer *buf, int full_backref)
3281 {
3282         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3283 }
3284
3285 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3286                   struct extent_buffer *buf, int full_backref)
3287 {
3288         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3289 }
3290
3291 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3292                                  struct btrfs_root *root,
3293                                  struct btrfs_path *path,
3294                                  struct btrfs_block_group_cache *cache)
3295 {
3296         int ret;
3297         struct btrfs_root *extent_root = root->fs_info->extent_root;
3298         unsigned long bi;
3299         struct extent_buffer *leaf;
3300
3301         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3302         if (ret) {
3303                 if (ret > 0)
3304                         ret = -ENOENT;
3305                 goto fail;
3306         }
3307
3308         leaf = path->nodes[0];
3309         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3310         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3311         btrfs_mark_buffer_dirty(leaf);
3312 fail:
3313         btrfs_release_path(path);
3314         return ret;
3315
3316 }
3317
3318 static struct btrfs_block_group_cache *
3319 next_block_group(struct btrfs_root *root,
3320                  struct btrfs_block_group_cache *cache)
3321 {
3322         struct rb_node *node;
3323
3324         spin_lock(&root->fs_info->block_group_cache_lock);
3325
3326         /* If our block group was removed, we need a full search. */
3327         if (RB_EMPTY_NODE(&cache->cache_node)) {
3328                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3329
3330                 spin_unlock(&root->fs_info->block_group_cache_lock);
3331                 btrfs_put_block_group(cache);
3332                 cache = btrfs_lookup_first_block_group(root->fs_info,
3333                                                        next_bytenr);
3334                 return cache;
3335         }
3336         node = rb_next(&cache->cache_node);
3337         btrfs_put_block_group(cache);
3338         if (node) {
3339                 cache = rb_entry(node, struct btrfs_block_group_cache,
3340                                  cache_node);
3341                 btrfs_get_block_group(cache);
3342         } else
3343                 cache = NULL;
3344         spin_unlock(&root->fs_info->block_group_cache_lock);
3345         return cache;
3346 }
3347
3348 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3349                             struct btrfs_trans_handle *trans,
3350                             struct btrfs_path *path)
3351 {
3352         struct btrfs_root *root = block_group->fs_info->tree_root;
3353         struct inode *inode = NULL;
3354         u64 alloc_hint = 0;
3355         int dcs = BTRFS_DC_ERROR;
3356         u64 num_pages = 0;
3357         int retries = 0;
3358         int ret = 0;
3359
3360         /*
3361          * If this block group is smaller than 100 megs don't bother caching the
3362          * block group.
3363          */
3364         if (block_group->key.offset < (100 * SZ_1M)) {
3365                 spin_lock(&block_group->lock);
3366                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3367                 spin_unlock(&block_group->lock);
3368                 return 0;
3369         }
3370
3371         if (trans->aborted)
3372                 return 0;
3373 again:
3374         inode = lookup_free_space_inode(root, block_group, path);
3375         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3376                 ret = PTR_ERR(inode);
3377                 btrfs_release_path(path);
3378                 goto out;
3379         }
3380
3381         if (IS_ERR(inode)) {
3382                 BUG_ON(retries);
3383                 retries++;
3384
3385                 if (block_group->ro)
3386                         goto out_free;
3387
3388                 ret = create_free_space_inode(root, trans, block_group, path);
3389                 if (ret)
3390                         goto out_free;
3391                 goto again;
3392         }
3393
3394         /* We've already setup this transaction, go ahead and exit */
3395         if (block_group->cache_generation == trans->transid &&
3396             i_size_read(inode)) {
3397                 dcs = BTRFS_DC_SETUP;
3398                 goto out_put;
3399         }
3400
3401         /*
3402          * We want to set the generation to 0, that way if anything goes wrong
3403          * from here on out we know not to trust this cache when we load up next
3404          * time.
3405          */
3406         BTRFS_I(inode)->generation = 0;
3407         ret = btrfs_update_inode(trans, root, inode);
3408         if (ret) {
3409                 /*
3410                  * So theoretically we could recover from this, simply set the
3411                  * super cache generation to 0 so we know to invalidate the
3412                  * cache, but then we'd have to keep track of the block groups
3413                  * that fail this way so we know we _have_ to reset this cache
3414                  * before the next commit or risk reading stale cache.  So to
3415                  * limit our exposure to horrible edge cases lets just abort the
3416                  * transaction, this only happens in really bad situations
3417                  * anyway.
3418                  */
3419                 btrfs_abort_transaction(trans, ret);
3420                 goto out_put;
3421         }
3422         WARN_ON(ret);
3423
3424         if (i_size_read(inode) > 0) {
3425                 ret = btrfs_check_trunc_cache_free_space(root,
3426                                         &root->fs_info->global_block_rsv);
3427                 if (ret)
3428                         goto out_put;
3429
3430                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3431                 if (ret)
3432                         goto out_put;
3433         }
3434
3435         spin_lock(&block_group->lock);
3436         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3437             !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
3438                 /*
3439                  * don't bother trying to write stuff out _if_
3440                  * a) we're not cached,
3441                  * b) we're with nospace_cache mount option.
3442                  */
3443                 dcs = BTRFS_DC_WRITTEN;
3444                 spin_unlock(&block_group->lock);
3445                 goto out_put;
3446         }
3447         spin_unlock(&block_group->lock);
3448
3449         /*
3450          * We hit an ENOSPC when setting up the cache in this transaction, just
3451          * skip doing the setup, we've already cleared the cache so we're safe.
3452          */
3453         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3454                 ret = -ENOSPC;
3455                 goto out_put;
3456         }
3457
3458         /*
3459          * Try to preallocate enough space based on how big the block group is.
3460          * Keep in mind this has to include any pinned space which could end up
3461          * taking up quite a bit since it's not folded into the other space
3462          * cache.
3463          */
3464         num_pages = div_u64(block_group->key.offset, SZ_256M);
3465         if (!num_pages)
3466                 num_pages = 1;
3467
3468         num_pages *= 16;
3469         num_pages *= PAGE_SIZE;
3470
3471         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3472         if (ret)
3473                 goto out_put;
3474
3475         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3476                                               num_pages, num_pages,
3477                                               &alloc_hint);
3478         /*
3479          * Our cache requires contiguous chunks so that we don't modify a bunch
3480          * of metadata or split extents when writing the cache out, which means
3481          * we can enospc if we are heavily fragmented in addition to just normal
3482          * out of space conditions.  So if we hit this just skip setting up any
3483          * other block groups for this transaction, maybe we'll unpin enough
3484          * space the next time around.
3485          */
3486         if (!ret)
3487                 dcs = BTRFS_DC_SETUP;
3488         else if (ret == -ENOSPC)
3489                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3490
3491 out_put:
3492         iput(inode);
3493 out_free:
3494         btrfs_release_path(path);
3495 out:
3496         spin_lock(&block_group->lock);
3497         if (!ret && dcs == BTRFS_DC_SETUP)
3498                 block_group->cache_generation = trans->transid;
3499         block_group->disk_cache_state = dcs;
3500         spin_unlock(&block_group->lock);
3501
3502         return ret;
3503 }
3504
3505 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3506                             struct btrfs_root *root)
3507 {
3508         struct btrfs_block_group_cache *cache, *tmp;
3509         struct btrfs_transaction *cur_trans = trans->transaction;
3510         struct btrfs_path *path;
3511
3512         if (list_empty(&cur_trans->dirty_bgs) ||
3513             !btrfs_test_opt(root->fs_info, SPACE_CACHE))
3514                 return 0;
3515
3516         path = btrfs_alloc_path();
3517         if (!path)
3518                 return -ENOMEM;
3519
3520         /* Could add new block groups, use _safe just in case */
3521         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3522                                  dirty_list) {
3523                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3524                         cache_save_setup(cache, trans, path);
3525         }
3526
3527         btrfs_free_path(path);
3528         return 0;
3529 }
3530
3531 /*
3532  * transaction commit does final block group cache writeback during a
3533  * critical section where nothing is allowed to change the FS.  This is
3534  * required in order for the cache to actually match the block group,
3535  * but can introduce a lot of latency into the commit.
3536  *
3537  * So, btrfs_start_dirty_block_groups is here to kick off block group
3538  * cache IO.  There's a chance we'll have to redo some of it if the
3539  * block group changes again during the commit, but it greatly reduces
3540  * the commit latency by getting rid of the easy block groups while
3541  * we're still allowing others to join the commit.
3542  */
3543 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3544                                    struct btrfs_root *root)
3545 {
3546         struct btrfs_block_group_cache *cache;
3547         struct btrfs_transaction *cur_trans = trans->transaction;
3548         int ret = 0;
3549         int should_put;
3550         struct btrfs_path *path = NULL;
3551         LIST_HEAD(dirty);
3552         struct list_head *io = &cur_trans->io_bgs;
3553         int num_started = 0;
3554         int loops = 0;
3555
3556         spin_lock(&cur_trans->dirty_bgs_lock);
3557         if (list_empty(&cur_trans->dirty_bgs)) {
3558                 spin_unlock(&cur_trans->dirty_bgs_lock);
3559                 return 0;
3560         }
3561         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3562         spin_unlock(&cur_trans->dirty_bgs_lock);
3563
3564 again:
3565         /*
3566          * make sure all the block groups on our dirty list actually
3567          * exist
3568          */
3569         btrfs_create_pending_block_groups(trans, root);
3570
3571         if (!path) {
3572                 path = btrfs_alloc_path();
3573                 if (!path)
3574                         return -ENOMEM;
3575         }
3576
3577         /*
3578          * cache_write_mutex is here only to save us from balance or automatic
3579          * removal of empty block groups deleting this block group while we are
3580          * writing out the cache
3581          */
3582         mutex_lock(&trans->transaction->cache_write_mutex);
3583         while (!list_empty(&dirty)) {
3584                 cache = list_first_entry(&dirty,
3585                                          struct btrfs_block_group_cache,
3586                                          dirty_list);
3587                 /*
3588                  * this can happen if something re-dirties a block
3589                  * group that is already under IO.  Just wait for it to
3590                  * finish and then do it all again
3591                  */
3592                 if (!list_empty(&cache->io_list)) {
3593                         list_del_init(&cache->io_list);
3594                         btrfs_wait_cache_io(root, trans, cache,
3595                                             &cache->io_ctl, path,
3596                                             cache->key.objectid);
3597                         btrfs_put_block_group(cache);
3598                 }
3599
3600
3601                 /*
3602                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3603                  * if it should update the cache_state.  Don't delete
3604                  * until after we wait.
3605                  *
3606                  * Since we're not running in the commit critical section
3607                  * we need the dirty_bgs_lock to protect from update_block_group
3608                  */
3609                 spin_lock(&cur_trans->dirty_bgs_lock);
3610                 list_del_init(&cache->dirty_list);
3611                 spin_unlock(&cur_trans->dirty_bgs_lock);
3612
3613                 should_put = 1;
3614
3615                 cache_save_setup(cache, trans, path);
3616
3617                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3618                         cache->io_ctl.inode = NULL;
3619                         ret = btrfs_write_out_cache(root, trans, cache, path);
3620                         if (ret == 0 && cache->io_ctl.inode) {
3621                                 num_started++;
3622                                 should_put = 0;
3623
3624                                 /*
3625                                  * the cache_write_mutex is protecting
3626                                  * the io_list
3627                                  */
3628                                 list_add_tail(&cache->io_list, io);
3629                         } else {
3630                                 /*
3631                                  * if we failed to write the cache, the
3632                                  * generation will be bad and life goes on
3633                                  */
3634                                 ret = 0;
3635                         }
3636                 }
3637                 if (!ret) {
3638                         ret = write_one_cache_group(trans, root, path, cache);
3639                         /*
3640                          * Our block group might still be attached to the list
3641                          * of new block groups in the transaction handle of some
3642                          * other task (struct btrfs_trans_handle->new_bgs). This
3643                          * means its block group item isn't yet in the extent
3644                          * tree. If this happens ignore the error, as we will
3645                          * try again later in the critical section of the
3646                          * transaction commit.
3647                          */
3648                         if (ret == -ENOENT) {
3649                                 ret = 0;
3650                                 spin_lock(&cur_trans->dirty_bgs_lock);
3651                                 if (list_empty(&cache->dirty_list)) {
3652                                         list_add_tail(&cache->dirty_list,
3653                                                       &cur_trans->dirty_bgs);
3654                                         btrfs_get_block_group(cache);
3655                                 }
3656                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3657                         } else if (ret) {
3658                                 btrfs_abort_transaction(trans, ret);
3659                         }
3660                 }
3661
3662                 /* if its not on the io list, we need to put the block group */
3663                 if (should_put)
3664                         btrfs_put_block_group(cache);
3665
3666                 if (ret)
3667                         break;
3668
3669                 /*
3670                  * Avoid blocking other tasks for too long. It might even save
3671                  * us from writing caches for block groups that are going to be
3672                  * removed.
3673                  */
3674                 mutex_unlock(&trans->transaction->cache_write_mutex);
3675                 mutex_lock(&trans->transaction->cache_write_mutex);
3676         }
3677         mutex_unlock(&trans->transaction->cache_write_mutex);
3678
3679         /*
3680          * go through delayed refs for all the stuff we've just kicked off
3681          * and then loop back (just once)
3682          */
3683         ret = btrfs_run_delayed_refs(trans, root, 0);
3684         if (!ret && loops == 0) {
3685                 loops++;
3686                 spin_lock(&cur_trans->dirty_bgs_lock);
3687                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3688                 /*
3689                  * dirty_bgs_lock protects us from concurrent block group
3690                  * deletes too (not just cache_write_mutex).
3691                  */
3692                 if (!list_empty(&dirty)) {
3693                         spin_unlock(&cur_trans->dirty_bgs_lock);
3694                         goto again;
3695                 }
3696                 spin_unlock(&cur_trans->dirty_bgs_lock);
3697         }
3698
3699         btrfs_free_path(path);
3700         return ret;
3701 }
3702
3703 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3704                                    struct btrfs_root *root)
3705 {
3706         struct btrfs_block_group_cache *cache;
3707         struct btrfs_transaction *cur_trans = trans->transaction;
3708         int ret = 0;
3709         int should_put;
3710         struct btrfs_path *path;
3711         struct list_head *io = &cur_trans->io_bgs;
3712         int num_started = 0;
3713
3714         path = btrfs_alloc_path();
3715         if (!path)
3716                 return -ENOMEM;
3717
3718         /*
3719          * Even though we are in the critical section of the transaction commit,
3720          * we can still have concurrent tasks adding elements to this
3721          * transaction's list of dirty block groups. These tasks correspond to
3722          * endio free space workers started when writeback finishes for a
3723          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3724          * allocate new block groups as a result of COWing nodes of the root
3725          * tree when updating the free space inode. The writeback for the space
3726          * caches is triggered by an earlier call to
3727          * btrfs_start_dirty_block_groups() and iterations of the following
3728          * loop.
3729          * Also we want to do the cache_save_setup first and then run the
3730          * delayed refs to make sure we have the best chance at doing this all
3731          * in one shot.
3732          */
3733         spin_lock(&cur_trans->dirty_bgs_lock);
3734         while (!list_empty(&cur_trans->dirty_bgs)) {
3735                 cache = list_first_entry(&cur_trans->dirty_bgs,
3736                                          struct btrfs_block_group_cache,
3737                                          dirty_list);
3738
3739                 /*
3740                  * this can happen if cache_save_setup re-dirties a block
3741                  * group that is already under IO.  Just wait for it to
3742                  * finish and then do it all again
3743                  */
3744                 if (!list_empty(&cache->io_list)) {
3745                         spin_unlock(&cur_trans->dirty_bgs_lock);
3746                         list_del_init(&cache->io_list);
3747                         btrfs_wait_cache_io(root, trans, cache,
3748                                             &cache->io_ctl, path,
3749                                             cache->key.objectid);
3750                         btrfs_put_block_group(cache);
3751                         spin_lock(&cur_trans->dirty_bgs_lock);
3752                 }
3753
3754                 /*
3755                  * don't remove from the dirty list until after we've waited
3756                  * on any pending IO
3757                  */
3758                 list_del_init(&cache->dirty_list);
3759                 spin_unlock(&cur_trans->dirty_bgs_lock);
3760                 should_put = 1;
3761
3762                 cache_save_setup(cache, trans, path);
3763
3764                 if (!ret)
3765                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3766
3767                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3768                         cache->io_ctl.inode = NULL;
3769                         ret = btrfs_write_out_cache(root, trans, cache, path);
3770                         if (ret == 0 && cache->io_ctl.inode) {
3771                                 num_started++;
3772                                 should_put = 0;
3773                                 list_add_tail(&cache->io_list, io);
3774                         } else {
3775                                 /*
3776                                  * if we failed to write the cache, the
3777                                  * generation will be bad and life goes on
3778                                  */
3779                                 ret = 0;
3780                         }
3781                 }
3782                 if (!ret) {
3783                         ret = write_one_cache_group(trans, root, path, cache);
3784                         /*
3785                          * One of the free space endio workers might have
3786                          * created a new block group while updating a free space
3787                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3788                          * and hasn't released its transaction handle yet, in
3789                          * which case the new block group is still attached to
3790                          * its transaction handle and its creation has not
3791                          * finished yet (no block group item in the extent tree
3792                          * yet, etc). If this is the case, wait for all free
3793                          * space endio workers to finish and retry. This is a
3794                          * a very rare case so no need for a more efficient and
3795                          * complex approach.
3796                          */
3797                         if (ret == -ENOENT) {
3798                                 wait_event(cur_trans->writer_wait,
3799                                    atomic_read(&cur_trans->num_writers) == 1);
3800                                 ret = write_one_cache_group(trans, root, path,
3801                                                             cache);
3802                         }
3803                         if (ret)
3804                                 btrfs_abort_transaction(trans, ret);
3805                 }
3806
3807                 /* if its not on the io list, we need to put the block group */
3808                 if (should_put)
3809                         btrfs_put_block_group(cache);
3810                 spin_lock(&cur_trans->dirty_bgs_lock);
3811         }
3812         spin_unlock(&cur_trans->dirty_bgs_lock);
3813
3814         while (!list_empty(io)) {
3815                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3816                                          io_list);
3817                 list_del_init(&cache->io_list);
3818                 btrfs_wait_cache_io(root, trans, cache,
3819                                     &cache->io_ctl, path, cache->key.objectid);
3820                 btrfs_put_block_group(cache);
3821         }
3822
3823         btrfs_free_path(path);
3824         return ret;
3825 }
3826
3827 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3828 {
3829         struct btrfs_block_group_cache *block_group;
3830         int readonly = 0;
3831
3832         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3833         if (!block_group || block_group->ro)
3834                 readonly = 1;
3835         if (block_group)
3836                 btrfs_put_block_group(block_group);
3837         return readonly;
3838 }
3839
3840 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3841 {
3842         struct btrfs_block_group_cache *bg;
3843         bool ret = true;
3844
3845         bg = btrfs_lookup_block_group(fs_info, bytenr);
3846         if (!bg)
3847                 return false;
3848
3849         spin_lock(&bg->lock);
3850         if (bg->ro)
3851                 ret = false;
3852         else
3853                 atomic_inc(&bg->nocow_writers);
3854         spin_unlock(&bg->lock);
3855
3856         /* no put on block group, done by btrfs_dec_nocow_writers */
3857         if (!ret)
3858                 btrfs_put_block_group(bg);
3859
3860         return ret;
3861
3862 }
3863
3864 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3865 {
3866         struct btrfs_block_group_cache *bg;
3867
3868         bg = btrfs_lookup_block_group(fs_info, bytenr);
3869         ASSERT(bg);
3870         if (atomic_dec_and_test(&bg->nocow_writers))
3871                 wake_up_atomic_t(&bg->nocow_writers);
3872         /*
3873          * Once for our lookup and once for the lookup done by a previous call
3874          * to btrfs_inc_nocow_writers()
3875          */
3876         btrfs_put_block_group(bg);
3877         btrfs_put_block_group(bg);
3878 }
3879
3880 static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
3881 {
3882         schedule();
3883         return 0;
3884 }
3885
3886 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3887 {
3888         wait_on_atomic_t(&bg->nocow_writers,
3889                          btrfs_wait_nocow_writers_atomic_t,
3890                          TASK_UNINTERRUPTIBLE);
3891 }
3892
3893 static const char *alloc_name(u64 flags)
3894 {
3895         switch (flags) {
3896         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3897                 return "mixed";
3898         case BTRFS_BLOCK_GROUP_METADATA:
3899                 return "metadata";
3900         case BTRFS_BLOCK_GROUP_DATA:
3901                 return "data";
3902         case BTRFS_BLOCK_GROUP_SYSTEM:
3903                 return "system";
3904         default:
3905                 WARN_ON(1);
3906                 return "invalid-combination";
3907         };
3908 }
3909
3910 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3911                              u64 total_bytes, u64 bytes_used,
3912                              u64 bytes_readonly,
3913                              struct btrfs_space_info **space_info)
3914 {
3915         struct btrfs_space_info *found;
3916         int i;
3917         int factor;
3918         int ret;
3919
3920         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3921                      BTRFS_BLOCK_GROUP_RAID10))
3922                 factor = 2;
3923         else
3924                 factor = 1;
3925
3926         found = __find_space_info(info, flags);
3927         if (found) {
3928                 spin_lock(&found->lock);
3929                 found->total_bytes += total_bytes;
3930                 found->disk_total += total_bytes * factor;
3931                 found->bytes_used += bytes_used;
3932                 found->disk_used += bytes_used * factor;
3933                 found->bytes_readonly += bytes_readonly;
3934                 if (total_bytes > 0)
3935                         found->full = 0;
3936                 space_info_add_new_bytes(info, found, total_bytes -
3937                                          bytes_used - bytes_readonly);
3938                 spin_unlock(&found->lock);
3939                 *space_info = found;
3940                 return 0;
3941         }
3942         found = kzalloc(sizeof(*found), GFP_NOFS);
3943         if (!found)
3944                 return -ENOMEM;
3945
3946         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3947         if (ret) {
3948                 kfree(found);
3949                 return ret;
3950         }
3951
3952         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3953                 INIT_LIST_HEAD(&found->block_groups[i]);
3954         init_rwsem(&found->groups_sem);
3955         spin_lock_init(&found->lock);
3956         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3957         found->total_bytes = total_bytes;
3958         found->disk_total = total_bytes * factor;
3959         found->bytes_used = bytes_used;
3960         found->disk_used = bytes_used * factor;
3961         found->bytes_pinned = 0;
3962         found->bytes_reserved = 0;
3963         found->bytes_readonly = bytes_readonly;
3964         found->bytes_may_use = 0;
3965         found->full = 0;
3966         found->max_extent_size = 0;
3967         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3968         found->chunk_alloc = 0;
3969         found->flush = 0;
3970         init_waitqueue_head(&found->wait);
3971         INIT_LIST_HEAD(&found->ro_bgs);
3972         INIT_LIST_HEAD(&found->tickets);
3973         INIT_LIST_HEAD(&found->priority_tickets);
3974
3975         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3976                                     info->space_info_kobj, "%s",
3977                                     alloc_name(found->flags));
3978         if (ret) {
3979                 kfree(found);
3980                 return ret;
3981         }
3982
3983         *space_info = found;
3984         list_add_rcu(&found->list, &info->space_info);
3985         if (flags & BTRFS_BLOCK_GROUP_DATA)
3986                 info->data_sinfo = found;
3987
3988         return ret;
3989 }
3990
3991 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3992 {
3993         u64 extra_flags = chunk_to_extended(flags) &
3994                                 BTRFS_EXTENDED_PROFILE_MASK;
3995
3996         write_seqlock(&fs_info->profiles_lock);
3997         if (flags & BTRFS_BLOCK_GROUP_DATA)
3998                 fs_info->avail_data_alloc_bits |= extra_flags;
3999         if (flags & BTRFS_BLOCK_GROUP_METADATA)
4000                 fs_info->avail_metadata_alloc_bits |= extra_flags;
4001         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4002                 fs_info->avail_system_alloc_bits |= extra_flags;
4003         write_sequnlock(&fs_info->profiles_lock);
4004 }
4005
4006 /*
4007  * returns target flags in extended format or 0 if restripe for this
4008  * chunk_type is not in progress
4009  *
4010  * should be called with either volume_mutex or balance_lock held
4011  */
4012 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4013 {
4014         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4015         u64 target = 0;
4016
4017         if (!bctl)
4018                 return 0;
4019
4020         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4021             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4022                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4023         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4024                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4025                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4026         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4027                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4028                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4029         }
4030
4031         return target;
4032 }
4033
4034 /*
4035  * @flags: available profiles in extended format (see ctree.h)
4036  *
4037  * Returns reduced profile in chunk format.  If profile changing is in
4038  * progress (either running or paused) picks the target profile (if it's
4039  * already available), otherwise falls back to plain reducing.
4040  */
4041 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
4042 {
4043         u64 num_devices = root->fs_info->fs_devices->rw_devices;
4044         u64 target;
4045         u64 raid_type;
4046         u64 allowed = 0;
4047
4048         /*
4049          * see if restripe for this chunk_type is in progress, if so
4050          * try to reduce to the target profile
4051          */
4052         spin_lock(&root->fs_info->balance_lock);
4053         target = get_restripe_target(root->fs_info, flags);
4054         if (target) {
4055                 /* pick target profile only if it's already available */
4056                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4057                         spin_unlock(&root->fs_info->balance_lock);
4058                         return extended_to_chunk(target);
4059                 }
4060         }
4061         spin_unlock(&root->fs_info->balance_lock);
4062
4063         /* First, mask out the RAID levels which aren't possible */
4064         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4065                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4066                         allowed |= btrfs_raid_group[raid_type];
4067         }
4068         allowed &= flags;
4069
4070         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4071                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4072         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4073                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4074         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4075                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4076         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4077                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4078         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4079                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4080
4081         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4082
4083         return extended_to_chunk(flags | allowed);
4084 }
4085
4086 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
4087 {
4088         unsigned seq;
4089         u64 flags;
4090
4091         do {
4092                 flags = orig_flags;
4093                 seq = read_seqbegin(&root->fs_info->profiles_lock);
4094
4095                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4096                         flags |= root->fs_info->avail_data_alloc_bits;
4097                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4098                         flags |= root->fs_info->avail_system_alloc_bits;
4099                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4100                         flags |= root->fs_info->avail_metadata_alloc_bits;
4101         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
4102
4103         return btrfs_reduce_alloc_profile(root, flags);
4104 }
4105
4106 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4107 {
4108         u64 flags;
4109         u64 ret;
4110
4111         if (data)
4112                 flags = BTRFS_BLOCK_GROUP_DATA;
4113         else if (root == root->fs_info->chunk_root)
4114                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4115         else
4116                 flags = BTRFS_BLOCK_GROUP_METADATA;
4117
4118         ret = get_alloc_profile(root, flags);
4119         return ret;
4120 }
4121
4122 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4123 {
4124         struct btrfs_space_info *data_sinfo;
4125         struct btrfs_root *root = BTRFS_I(inode)->root;
4126         struct btrfs_fs_info *fs_info = root->fs_info;
4127         u64 used;
4128         int ret = 0;
4129         int need_commit = 2;
4130         int have_pinned_space;
4131
4132         /* make sure bytes are sectorsize aligned */
4133         bytes = ALIGN(bytes, root->sectorsize);
4134
4135         if (btrfs_is_free_space_inode(inode)) {
4136                 need_commit = 0;
4137                 ASSERT(current->journal_info);
4138         }
4139
4140         data_sinfo = fs_info->data_sinfo;
4141         if (!data_sinfo)
4142                 goto alloc;
4143
4144 again:
4145         /* make sure we have enough space to handle the data first */
4146         spin_lock(&data_sinfo->lock);
4147         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4148                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4149                 data_sinfo->bytes_may_use;
4150
4151         if (used + bytes > data_sinfo->total_bytes) {
4152                 struct btrfs_trans_handle *trans;
4153
4154                 /*
4155                  * if we don't have enough free bytes in this space then we need
4156                  * to alloc a new chunk.
4157                  */
4158                 if (!data_sinfo->full) {
4159                         u64 alloc_target;
4160
4161                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4162                         spin_unlock(&data_sinfo->lock);
4163 alloc:
4164                         alloc_target = btrfs_get_alloc_profile(root, 1);
4165                         /*
4166                          * It is ugly that we don't call nolock join
4167                          * transaction for the free space inode case here.
4168                          * But it is safe because we only do the data space
4169                          * reservation for the free space cache in the
4170                          * transaction context, the common join transaction
4171                          * just increase the counter of the current transaction
4172                          * handler, doesn't try to acquire the trans_lock of
4173                          * the fs.
4174                          */
4175                         trans = btrfs_join_transaction(root);
4176                         if (IS_ERR(trans))
4177                                 return PTR_ERR(trans);
4178
4179                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4180                                              alloc_target,
4181                                              CHUNK_ALLOC_NO_FORCE);
4182                         btrfs_end_transaction(trans, root);
4183                         if (ret < 0) {
4184                                 if (ret != -ENOSPC)
4185                                         return ret;
4186                                 else {
4187                                         have_pinned_space = 1;
4188                                         goto commit_trans;
4189                                 }
4190                         }
4191
4192                         if (!data_sinfo)
4193                                 data_sinfo = fs_info->data_sinfo;
4194
4195                         goto again;
4196                 }
4197
4198                 /*
4199                  * If we don't have enough pinned space to deal with this
4200                  * allocation, and no removed chunk in current transaction,
4201                  * don't bother committing the transaction.
4202                  */
4203                 have_pinned_space = percpu_counter_compare(
4204                         &data_sinfo->total_bytes_pinned,
4205                         used + bytes - data_sinfo->total_bytes);
4206                 spin_unlock(&data_sinfo->lock);
4207
4208                 /* commit the current transaction and try again */
4209 commit_trans:
4210                 if (need_commit &&
4211                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4212                         need_commit--;
4213
4214                         if (need_commit > 0) {
4215                                 btrfs_start_delalloc_roots(fs_info, 0, -1);
4216                                 btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
4217                         }
4218
4219                         trans = btrfs_join_transaction(root);
4220                         if (IS_ERR(trans))
4221                                 return PTR_ERR(trans);
4222                         if (have_pinned_space >= 0 ||
4223                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4224                                      &trans->transaction->flags) ||
4225                             need_commit > 0) {
4226                                 ret = btrfs_commit_transaction(trans, root);
4227                                 if (ret)
4228                                         return ret;
4229                                 /*
4230                                  * The cleaner kthread might still be doing iput
4231                                  * operations. Wait for it to finish so that
4232                                  * more space is released.
4233                                  */
4234                                 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4235                                 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4236                                 goto again;
4237                         } else {
4238                                 btrfs_end_transaction(trans, root);
4239                         }
4240                 }
4241
4242                 trace_btrfs_space_reservation(root->fs_info,
4243                                               "space_info:enospc",
4244                                               data_sinfo->flags, bytes, 1);
4245                 return -ENOSPC;
4246         }
4247         data_sinfo->bytes_may_use += bytes;
4248         trace_btrfs_space_reservation(root->fs_info, "space_info",
4249                                       data_sinfo->flags, bytes, 1);
4250         spin_unlock(&data_sinfo->lock);
4251
4252         return ret;
4253 }
4254
4255 /*
4256  * New check_data_free_space() with ability for precious data reservation
4257  * Will replace old btrfs_check_data_free_space(), but for patch split,
4258  * add a new function first and then replace it.
4259  */
4260 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4261 {
4262         struct btrfs_root *root = BTRFS_I(inode)->root;
4263         int ret;
4264
4265         /* align the range */
4266         len = round_up(start + len, root->sectorsize) -
4267               round_down(start, root->sectorsize);
4268         start = round_down(start, root->sectorsize);
4269
4270         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4271         if (ret < 0)
4272                 return ret;
4273
4274         /*
4275          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4276          *
4277          * TODO: Find a good method to avoid reserve data space for NOCOW
4278          * range, but don't impact performance on quota disable case.
4279          */
4280         ret = btrfs_qgroup_reserve_data(inode, start, len);
4281         return ret;
4282 }
4283
4284 /*
4285  * Called if we need to clear a data reservation for this inode
4286  * Normally in a error case.
4287  *
4288  * This one will *NOT* use accurate qgroup reserved space API, just for case
4289  * which we can't sleep and is sure it won't affect qgroup reserved space.
4290  * Like clear_bit_hook().
4291  */
4292 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4293                                             u64 len)
4294 {
4295         struct btrfs_root *root = BTRFS_I(inode)->root;
4296         struct btrfs_space_info *data_sinfo;
4297
4298         /* Make sure the range is aligned to sectorsize */
4299         len = round_up(start + len, root->sectorsize) -
4300               round_down(start, root->sectorsize);
4301         start = round_down(start, root->sectorsize);
4302
4303         data_sinfo = root->fs_info->data_sinfo;
4304         spin_lock(&data_sinfo->lock);
4305         if (WARN_ON(data_sinfo->bytes_may_use < len))
4306                 data_sinfo->bytes_may_use = 0;
4307         else
4308                 data_sinfo->bytes_may_use -= len;
4309         trace_btrfs_space_reservation(root->fs_info, "space_info",
4310                                       data_sinfo->flags, len, 0);
4311         spin_unlock(&data_sinfo->lock);
4312 }
4313
4314 /*
4315  * Called if we need to clear a data reservation for this inode
4316  * Normally in a error case.
4317  *
4318  * This one will handle the per-inode data rsv map for accurate reserved
4319  * space framework.
4320  */
4321 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4322 {
4323         btrfs_free_reserved_data_space_noquota(inode, start, len);
4324         btrfs_qgroup_free_data(inode, start, len);
4325 }
4326
4327 static void force_metadata_allocation(struct btrfs_fs_info *info)
4328 {
4329         struct list_head *head = &info->space_info;
4330         struct btrfs_space_info *found;
4331
4332         rcu_read_lock();
4333         list_for_each_entry_rcu(found, head, list) {
4334                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4335                         found->force_alloc = CHUNK_ALLOC_FORCE;
4336         }
4337         rcu_read_unlock();
4338 }
4339
4340 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4341 {
4342         return (global->size << 1);
4343 }
4344
4345 static int should_alloc_chunk(struct btrfs_root *root,
4346                               struct btrfs_space_info *sinfo, int force)
4347 {
4348         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4349         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4350         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4351         u64 thresh;
4352
4353         if (force == CHUNK_ALLOC_FORCE)
4354                 return 1;
4355
4356         /*
4357          * We need to take into account the global rsv because for all intents
4358          * and purposes it's used space.  Don't worry about locking the
4359          * global_rsv, it doesn't change except when the transaction commits.
4360          */
4361         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4362                 num_allocated += calc_global_rsv_need_space(global_rsv);
4363
4364         /*
4365          * in limited mode, we want to have some free space up to
4366          * about 1% of the FS size.
4367          */
4368         if (force == CHUNK_ALLOC_LIMITED) {
4369                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4370                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4371
4372                 if (num_bytes - num_allocated < thresh)
4373                         return 1;
4374         }
4375
4376         if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4377                 return 0;
4378         return 1;
4379 }
4380
4381 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4382 {
4383         u64 num_dev;
4384
4385         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4386                     BTRFS_BLOCK_GROUP_RAID0 |
4387                     BTRFS_BLOCK_GROUP_RAID5 |
4388                     BTRFS_BLOCK_GROUP_RAID6))
4389                 num_dev = root->fs_info->fs_devices->rw_devices;
4390         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4391                 num_dev = 2;
4392         else
4393                 num_dev = 1;    /* DUP or single */
4394
4395         return num_dev;
4396 }
4397
4398 /*
4399  * If @is_allocation is true, reserve space in the system space info necessary
4400  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4401  * removing a chunk.
4402  */
4403 void check_system_chunk(struct btrfs_trans_handle *trans,
4404                         struct btrfs_root *root,
4405                         u64 type)
4406 {
4407         struct btrfs_space_info *info;
4408         u64 left;
4409         u64 thresh;
4410         int ret = 0;
4411         u64 num_devs;
4412
4413         /*
4414          * Needed because we can end up allocating a system chunk and for an
4415          * atomic and race free space reservation in the chunk block reserve.
4416          */
4417         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4418
4419         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4420         spin_lock(&info->lock);
4421         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4422                 info->bytes_reserved - info->bytes_readonly -
4423                 info->bytes_may_use;
4424         spin_unlock(&info->lock);
4425
4426         num_devs = get_profile_num_devs(root, type);
4427
4428         /* num_devs device items to update and 1 chunk item to add or remove */
4429         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4430                 btrfs_calc_trans_metadata_size(root, 1);
4431
4432         if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
4433                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4434                         left, thresh, type);
4435                 dump_space_info(info, 0, 0);
4436         }
4437
4438         if (left < thresh) {
4439                 u64 flags;
4440
4441                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4442                 /*
4443                  * Ignore failure to create system chunk. We might end up not
4444                  * needing it, as we might not need to COW all nodes/leafs from
4445                  * the paths we visit in the chunk tree (they were already COWed
4446                  * or created in the current transaction for example).
4447                  */
4448                 ret = btrfs_alloc_chunk(trans, root, flags);
4449         }
4450
4451         if (!ret) {
4452                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4453                                           &root->fs_info->chunk_block_rsv,
4454                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4455                 if (!ret)
4456                         trans->chunk_bytes_reserved += thresh;
4457         }
4458 }
4459
4460 /*
4461  * If force is CHUNK_ALLOC_FORCE:
4462  *    - return 1 if it successfully allocates a chunk,
4463  *    - return errors including -ENOSPC otherwise.
4464  * If force is NOT CHUNK_ALLOC_FORCE:
4465  *    - return 0 if it doesn't need to allocate a new chunk,
4466  *    - return 1 if it successfully allocates a chunk,
4467  *    - return errors including -ENOSPC otherwise.
4468  */
4469 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4470                           struct btrfs_root *extent_root, u64 flags, int force)
4471 {
4472         struct btrfs_space_info *space_info;
4473         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4474         int wait_for_alloc = 0;
4475         int ret = 0;
4476
4477         /* Don't re-enter if we're already allocating a chunk */
4478         if (trans->allocating_chunk)
4479                 return -ENOSPC;
4480
4481         space_info = __find_space_info(extent_root->fs_info, flags);
4482         if (!space_info) {
4483                 ret = update_space_info(extent_root->fs_info, flags,
4484                                         0, 0, 0, &space_info);
4485                 BUG_ON(ret); /* -ENOMEM */
4486         }
4487         BUG_ON(!space_info); /* Logic error */
4488
4489 again:
4490         spin_lock(&space_info->lock);
4491         if (force < space_info->force_alloc)
4492                 force = space_info->force_alloc;
4493         if (space_info->full) {
4494                 if (should_alloc_chunk(extent_root, space_info, force))
4495                         ret = -ENOSPC;
4496                 else
4497                         ret = 0;
4498                 spin_unlock(&space_info->lock);
4499                 return ret;
4500         }
4501
4502         if (!should_alloc_chunk(extent_root, space_info, force)) {
4503                 spin_unlock(&space_info->lock);
4504                 return 0;
4505         } else if (space_info->chunk_alloc) {
4506                 wait_for_alloc = 1;
4507         } else {
4508                 space_info->chunk_alloc = 1;
4509         }
4510
4511         spin_unlock(&space_info->lock);
4512
4513         mutex_lock(&fs_info->chunk_mutex);
4514
4515         /*
4516          * The chunk_mutex is held throughout the entirety of a chunk
4517          * allocation, so once we've acquired the chunk_mutex we know that the
4518          * other guy is done and we need to recheck and see if we should
4519          * allocate.
4520          */
4521         if (wait_for_alloc) {
4522                 mutex_unlock(&fs_info->chunk_mutex);
4523                 wait_for_alloc = 0;
4524                 goto again;
4525         }
4526
4527         trans->allocating_chunk = true;
4528
4529         /*
4530          * If we have mixed data/metadata chunks we want to make sure we keep
4531          * allocating mixed chunks instead of individual chunks.
4532          */
4533         if (btrfs_mixed_space_info(space_info))
4534                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4535
4536         /*
4537          * if we're doing a data chunk, go ahead and make sure that
4538          * we keep a reasonable number of metadata chunks allocated in the
4539          * FS as well.
4540          */
4541         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4542                 fs_info->data_chunk_allocations++;
4543                 if (!(fs_info->data_chunk_allocations %
4544                       fs_info->metadata_ratio))
4545                         force_metadata_allocation(fs_info);
4546         }
4547
4548         /*
4549          * Check if we have enough space in SYSTEM chunk because we may need
4550          * to update devices.
4551          */
4552         check_system_chunk(trans, extent_root, flags);
4553
4554         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4555         trans->allocating_chunk = false;
4556
4557         spin_lock(&space_info->lock);
4558         if (ret < 0 && ret != -ENOSPC)
4559                 goto out;
4560         if (ret)
4561                 space_info->full = 1;
4562         else
4563                 ret = 1;
4564
4565         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4566 out:
4567         space_info->chunk_alloc = 0;
4568         spin_unlock(&space_info->lock);
4569         mutex_unlock(&fs_info->chunk_mutex);
4570         /*
4571          * When we allocate a new chunk we reserve space in the chunk block
4572          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4573          * add new nodes/leafs to it if we end up needing to do it when
4574          * inserting the chunk item and updating device items as part of the
4575          * second phase of chunk allocation, performed by
4576          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4577          * large number of new block groups to create in our transaction
4578          * handle's new_bgs list to avoid exhausting the chunk block reserve
4579          * in extreme cases - like having a single transaction create many new
4580          * block groups when starting to write out the free space caches of all
4581          * the block groups that were made dirty during the lifetime of the
4582          * transaction.
4583          */
4584         if (trans->can_flush_pending_bgs &&
4585             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4586                 btrfs_create_pending_block_groups(trans, extent_root);
4587                 btrfs_trans_release_chunk_metadata(trans);
4588         }
4589         return ret;
4590 }
4591
4592 static int can_overcommit(struct btrfs_root *root,
4593                           struct btrfs_space_info *space_info, u64 bytes,
4594                           enum btrfs_reserve_flush_enum flush)
4595 {
4596         struct btrfs_block_rsv *global_rsv;
4597         u64 profile;
4598         u64 space_size;
4599         u64 avail;
4600         u64 used;
4601
4602         /* Don't overcommit when in mixed mode. */
4603         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4604                 return 0;
4605
4606         BUG_ON(root->fs_info == NULL);
4607         global_rsv = &root->fs_info->global_block_rsv;
4608         profile = btrfs_get_alloc_profile(root, 0);
4609         used = space_info->bytes_used + space_info->bytes_reserved +
4610                 space_info->bytes_pinned + space_info->bytes_readonly;
4611
4612         /*
4613          * We only want to allow over committing if we have lots of actual space
4614          * free, but if we don't have enough space to handle the global reserve
4615          * space then we could end up having a real enospc problem when trying
4616          * to allocate a chunk or some other such important allocation.
4617          */
4618         spin_lock(&global_rsv->lock);
4619         space_size = calc_global_rsv_need_space(global_rsv);
4620         spin_unlock(&global_rsv->lock);
4621         if (used + space_size >= space_info->total_bytes)
4622                 return 0;
4623
4624         used += space_info->bytes_may_use;
4625
4626         spin_lock(&root->fs_info->free_chunk_lock);
4627         avail = root->fs_info->free_chunk_space;
4628         spin_unlock(&root->fs_info->free_chunk_lock);
4629
4630         /*
4631          * If we have dup, raid1 or raid10 then only half of the free
4632          * space is actually useable.  For raid56, the space info used
4633          * doesn't include the parity drive, so we don't have to
4634          * change the math
4635          */
4636         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4637                        BTRFS_BLOCK_GROUP_RAID1 |
4638                        BTRFS_BLOCK_GROUP_RAID10))
4639                 avail >>= 1;
4640
4641         /*
4642          * If we aren't flushing all things, let us overcommit up to
4643          * 1/2th of the space. If we can flush, don't let us overcommit
4644          * too much, let it overcommit up to 1/8 of the space.
4645          */
4646         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4647                 avail >>= 3;
4648         else
4649                 avail >>= 1;
4650
4651         if (used + bytes < space_info->total_bytes + avail)
4652                 return 1;
4653         return 0;
4654 }
4655
4656 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4657                                          unsigned long nr_pages, int nr_items)
4658 {
4659         struct super_block *sb = root->fs_info->sb;
4660
4661         if (down_read_trylock(&sb->s_umount)) {
4662                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4663                 up_read(&sb->s_umount);
4664         } else {
4665                 /*
4666                  * We needn't worry the filesystem going from r/w to r/o though
4667                  * we don't acquire ->s_umount mutex, because the filesystem
4668                  * should guarantee the delalloc inodes list be empty after
4669                  * the filesystem is readonly(all dirty pages are written to
4670                  * the disk).
4671                  */
4672                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4673                 if (!current->journal_info)
4674                         btrfs_wait_ordered_roots(root->fs_info, nr_items,
4675                                                  0, (u64)-1);
4676         }
4677 }
4678
4679 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4680 {
4681         u64 bytes;
4682         int nr;
4683
4684         bytes = btrfs_calc_trans_metadata_size(root, 1);
4685         nr = (int)div64_u64(to_reclaim, bytes);
4686         if (!nr)
4687                 nr = 1;
4688         return nr;
4689 }
4690
4691 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4692
4693 /*
4694  * shrink metadata reservation for delalloc
4695  */
4696 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4697                             bool wait_ordered)
4698 {
4699         struct btrfs_block_rsv *block_rsv;
4700         struct btrfs_space_info *space_info;
4701         struct btrfs_trans_handle *trans;
4702         u64 delalloc_bytes;
4703         u64 max_reclaim;
4704         long time_left;
4705         unsigned long nr_pages;
4706         int loops;
4707         int items;
4708         enum btrfs_reserve_flush_enum flush;
4709
4710         /* Calc the number of the pages we need flush for space reservation */
4711         items = calc_reclaim_items_nr(root, to_reclaim);
4712         to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
4713
4714         trans = (struct btrfs_trans_handle *)current->journal_info;
4715         block_rsv = &root->fs_info->delalloc_block_rsv;
4716         space_info = block_rsv->space_info;
4717
4718         delalloc_bytes = percpu_counter_sum_positive(
4719                                                 &root->fs_info->delalloc_bytes);
4720         if (delalloc_bytes == 0) {
4721                 if (trans)
4722                         return;
4723                 if (wait_ordered)
4724                         btrfs_wait_ordered_roots(root->fs_info, items,
4725                                                  0, (u64)-1);
4726                 return;
4727         }
4728
4729         loops = 0;
4730         while (delalloc_bytes && loops < 3) {
4731                 max_reclaim = min(delalloc_bytes, to_reclaim);
4732                 nr_pages = max_reclaim >> PAGE_SHIFT;
4733                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4734                 /*
4735                  * We need to wait for the async pages to actually start before
4736                  * we do anything.
4737                  */
4738                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4739                 if (!max_reclaim)
4740                         goto skip_async;
4741
4742                 if (max_reclaim <= nr_pages)
4743                         max_reclaim = 0;
4744                 else
4745                         max_reclaim -= nr_pages;
4746
4747                 wait_event(root->fs_info->async_submit_wait,
4748                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4749                            (int)max_reclaim);
4750 skip_async:
4751                 if (!trans)
4752                         flush = BTRFS_RESERVE_FLUSH_ALL;
4753                 else
4754                         flush = BTRFS_RESERVE_NO_FLUSH;
4755                 spin_lock(&space_info->lock);
4756                 if (can_overcommit(root, space_info, orig, flush)) {
4757                         spin_unlock(&space_info->lock);
4758                         break;
4759                 }
4760                 if (list_empty(&space_info->tickets) &&
4761                     list_empty(&space_info->priority_tickets)) {
4762                         spin_unlock(&space_info->lock);
4763                         break;
4764                 }
4765                 spin_unlock(&space_info->lock);
4766
4767                 loops++;
4768                 if (wait_ordered && !trans) {
4769                         btrfs_wait_ordered_roots(root->fs_info, items,
4770                                                  0, (u64)-1);
4771                 } else {
4772                         time_left = schedule_timeout_killable(1);
4773                         if (time_left)
4774                                 break;
4775                 }
4776                 delalloc_bytes = percpu_counter_sum_positive(
4777                                                 &root->fs_info->delalloc_bytes);
4778         }
4779 }
4780
4781 /**
4782  * maybe_commit_transaction - possibly commit the transaction if its ok to
4783  * @root - the root we're allocating for
4784  * @bytes - the number of bytes we want to reserve
4785  * @force - force the commit
4786  *
4787  * This will check to make sure that committing the transaction will actually
4788  * get us somewhere and then commit the transaction if it does.  Otherwise it
4789  * will return -ENOSPC.
4790  */
4791 static int may_commit_transaction(struct btrfs_root *root,
4792                                   struct btrfs_space_info *space_info,
4793                                   u64 bytes, int force)
4794 {
4795         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4796         struct btrfs_trans_handle *trans;
4797
4798         trans = (struct btrfs_trans_handle *)current->journal_info;
4799         if (trans)
4800                 return -EAGAIN;
4801
4802         if (force)
4803                 goto commit;
4804
4805         /* See if there is enough pinned space to make this reservation */
4806         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4807                                    bytes) >= 0)
4808                 goto commit;
4809
4810         /*
4811          * See if there is some space in the delayed insertion reservation for
4812          * this reservation.
4813          */
4814         if (space_info != delayed_rsv->space_info)
4815                 return -ENOSPC;
4816
4817         spin_lock(&delayed_rsv->lock);
4818         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4819                                    bytes - delayed_rsv->size) >= 0) {
4820                 spin_unlock(&delayed_rsv->lock);
4821                 return -ENOSPC;
4822         }
4823         spin_unlock(&delayed_rsv->lock);
4824
4825 commit:
4826         trans = btrfs_join_transaction(root);
4827         if (IS_ERR(trans))
4828                 return -ENOSPC;
4829
4830         return btrfs_commit_transaction(trans, root);
4831 }
4832
4833 struct reserve_ticket {
4834         u64 bytes;
4835         int error;
4836         struct list_head list;
4837         wait_queue_head_t wait;
4838 };
4839
4840 static int flush_space(struct btrfs_root *root,
4841                        struct btrfs_space_info *space_info, u64 num_bytes,
4842                        u64 orig_bytes, int state)
4843 {
4844         struct btrfs_trans_handle *trans;
4845         int nr;
4846         int ret = 0;
4847
4848         switch (state) {
4849         case FLUSH_DELAYED_ITEMS_NR:
4850         case FLUSH_DELAYED_ITEMS:
4851                 if (state == FLUSH_DELAYED_ITEMS_NR)
4852                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4853                 else
4854                         nr = -1;
4855
4856                 trans = btrfs_join_transaction(root);
4857                 if (IS_ERR(trans)) {
4858                         ret = PTR_ERR(trans);
4859                         break;
4860                 }
4861                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4862                 btrfs_end_transaction(trans, root);
4863                 break;
4864         case FLUSH_DELALLOC:
4865         case FLUSH_DELALLOC_WAIT:
4866                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4867                                 state == FLUSH_DELALLOC_WAIT);
4868                 break;
4869         case ALLOC_CHUNK:
4870                 trans = btrfs_join_transaction(root);
4871                 if (IS_ERR(trans)) {
4872                         ret = PTR_ERR(trans);
4873                         break;
4874                 }
4875                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4876                                      btrfs_get_alloc_profile(root, 0),
4877                                      CHUNK_ALLOC_NO_FORCE);
4878                 btrfs_end_transaction(trans, root);
4879                 if (ret > 0 || ret == -ENOSPC)
4880                         ret = 0;
4881                 break;
4882         case COMMIT_TRANS:
4883                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4884                 break;
4885         default:
4886                 ret = -ENOSPC;
4887                 break;
4888         }
4889
4890         trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
4891                                 orig_bytes, state, ret);
4892         return ret;
4893 }
4894
4895 static inline u64
4896 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4897                                  struct btrfs_space_info *space_info)
4898 {
4899         struct reserve_ticket *ticket;
4900         u64 used;
4901         u64 expected;
4902         u64 to_reclaim = 0;
4903
4904         list_for_each_entry(ticket, &space_info->tickets, list)
4905                 to_reclaim += ticket->bytes;
4906         list_for_each_entry(ticket, &space_info->priority_tickets, list)
4907                 to_reclaim += ticket->bytes;
4908         if (to_reclaim)
4909                 return to_reclaim;
4910
4911         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4912         if (can_overcommit(root, space_info, to_reclaim,
4913                            BTRFS_RESERVE_FLUSH_ALL))
4914                 return 0;
4915
4916         used = space_info->bytes_used + space_info->bytes_reserved +
4917                space_info->bytes_pinned + space_info->bytes_readonly +
4918                space_info->bytes_may_use;
4919         if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4920                 expected = div_factor_fine(space_info->total_bytes, 95);
4921         else
4922                 expected = div_factor_fine(space_info->total_bytes, 90);
4923
4924         if (used > expected)
4925                 to_reclaim = used - expected;
4926         else
4927                 to_reclaim = 0;
4928         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4929                                      space_info->bytes_reserved);
4930         return to_reclaim;
4931 }
4932
4933 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4934                                         struct btrfs_root *root, u64 used)
4935 {
4936         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4937
4938         /* If we're just plain full then async reclaim just slows us down. */
4939         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4940                 return 0;
4941
4942         if (!btrfs_calc_reclaim_metadata_size(root, space_info))
4943                 return 0;
4944
4945         return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
4946                 !test_bit(BTRFS_FS_STATE_REMOUNTING,
4947                           &root->fs_info->fs_state));
4948 }
4949
4950 static void wake_all_tickets(struct list_head *head)
4951 {
4952         struct reserve_ticket *ticket;
4953
4954         while (!list_empty(head)) {
4955                 ticket = list_first_entry(head, struct reserve_ticket, list);
4956                 list_del_init(&ticket->list);
4957                 ticket->error = -ENOSPC;
4958                 wake_up(&ticket->wait);
4959         }
4960 }
4961
4962 /*
4963  * This is for normal flushers, we can wait all goddamned day if we want to.  We
4964  * will loop and continuously try to flush as long as we are making progress.
4965  * We count progress as clearing off tickets each time we have to loop.
4966  */
4967 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4968 {
4969         struct btrfs_fs_info *fs_info;
4970         struct btrfs_space_info *space_info;
4971         u64 to_reclaim;
4972         int flush_state;
4973         int commit_cycles = 0;
4974         u64 last_tickets_id;
4975
4976         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4977         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4978
4979         spin_lock(&space_info->lock);
4980         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4981                                                       space_info);
4982         if (!to_reclaim) {
4983                 space_info->flush = 0;
4984                 spin_unlock(&space_info->lock);
4985                 return;
4986         }
4987         last_tickets_id = space_info->tickets_id;
4988         spin_unlock(&space_info->lock);
4989
4990         flush_state = FLUSH_DELAYED_ITEMS_NR;
4991         do {
4992                 struct reserve_ticket *ticket;
4993                 int ret;
4994
4995                 ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
4996                             to_reclaim, flush_state);
4997                 spin_lock(&space_info->lock);
4998                 if (list_empty(&space_info->tickets)) {
4999                         space_info->flush = 0;
5000                         spin_unlock(&space_info->lock);
5001                         return;
5002                 }
5003                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5004                                                               space_info);
5005                 ticket = list_first_entry(&space_info->tickets,
5006                                           struct reserve_ticket, list);
5007                 if (last_tickets_id == space_info->tickets_id) {
5008                         flush_state++;
5009                 } else {
5010                         last_tickets_id = space_info->tickets_id;
5011                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5012                         if (commit_cycles)
5013                                 commit_cycles--;
5014                 }
5015
5016                 if (flush_state > COMMIT_TRANS) {
5017                         commit_cycles++;
5018                         if (commit_cycles > 2) {
5019                                 wake_all_tickets(&space_info->tickets);
5020                                 space_info->flush = 0;
5021                         } else {
5022                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
5023                         }
5024                 }
5025                 spin_unlock(&space_info->lock);
5026         } while (flush_state <= COMMIT_TRANS);
5027 }
5028
5029 void btrfs_init_async_reclaim_work(struct work_struct *work)
5030 {
5031         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5032 }
5033
5034 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5035                                             struct btrfs_space_info *space_info,
5036                                             struct reserve_ticket *ticket)
5037 {
5038         u64 to_reclaim;
5039         int flush_state = FLUSH_DELAYED_ITEMS_NR;
5040
5041         spin_lock(&space_info->lock);
5042         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5043                                                       space_info);
5044         if (!to_reclaim) {
5045                 spin_unlock(&space_info->lock);
5046                 return;
5047         }
5048         spin_unlock(&space_info->lock);
5049
5050         do {
5051                 flush_space(fs_info->fs_root, space_info, to_reclaim,
5052                             to_reclaim, flush_state);
5053                 flush_state++;
5054                 spin_lock(&space_info->lock);
5055                 if (ticket->bytes == 0) {
5056                         spin_unlock(&space_info->lock);
5057                         return;
5058                 }
5059                 spin_unlock(&space_info->lock);
5060
5061                 /*
5062                  * Priority flushers can't wait on delalloc without
5063                  * deadlocking.
5064                  */
5065                 if (flush_state == FLUSH_DELALLOC ||
5066                     flush_state == FLUSH_DELALLOC_WAIT)
5067                         flush_state = ALLOC_CHUNK;
5068         } while (flush_state < COMMIT_TRANS);
5069 }
5070
5071 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5072                                struct btrfs_space_info *space_info,
5073                                struct reserve_ticket *ticket, u64 orig_bytes)
5074
5075 {
5076         DEFINE_WAIT(wait);
5077         int ret = 0;
5078
5079         spin_lock(&space_info->lock);
5080         while (ticket->bytes > 0 && ticket->error == 0) {
5081                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5082                 if (ret) {
5083                         ret = -EINTR;
5084                         break;
5085                 }
5086                 spin_unlock(&space_info->lock);
5087
5088                 schedule();
5089
5090                 finish_wait(&ticket->wait, &wait);
5091                 spin_lock(&space_info->lock);
5092         }
5093         if (!ret)
5094                 ret = ticket->error;
5095         if (!list_empty(&ticket->list))
5096                 list_del_init(&ticket->list);
5097         if (ticket->bytes && ticket->bytes < orig_bytes) {
5098                 u64 num_bytes = orig_bytes - ticket->bytes;
5099                 space_info->bytes_may_use -= num_bytes;
5100                 trace_btrfs_space_reservation(fs_info, "space_info",
5101                                               space_info->flags, num_bytes, 0);
5102         }
5103         spin_unlock(&space_info->lock);
5104
5105         return ret;
5106 }
5107
5108 /**
5109  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5110  * @root - the root we're allocating for
5111  * @space_info - the space info we want to allocate from
5112  * @orig_bytes - the number of bytes we want
5113  * @flush - whether or not we can flush to make our reservation
5114  *
5115  * This will reserve orig_bytes number of bytes from the space info associated
5116  * with the block_rsv.  If there is not enough space it will make an attempt to
5117  * flush out space to make room.  It will do this by flushing delalloc if
5118  * possible or committing the transaction.  If flush is 0 then no attempts to
5119  * regain reservations will be made and this will fail if there is not enough
5120  * space already.
5121  */
5122 static int __reserve_metadata_bytes(struct btrfs_root *root,
5123                                     struct btrfs_space_info *space_info,
5124                                     u64 orig_bytes,
5125                                     enum btrfs_reserve_flush_enum flush)
5126 {
5127         struct reserve_ticket ticket;
5128         u64 used;
5129         int ret = 0;
5130
5131         ASSERT(orig_bytes);
5132         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5133
5134         spin_lock(&space_info->lock);
5135         ret = -ENOSPC;
5136         used = space_info->bytes_used + space_info->bytes_reserved +
5137                 space_info->bytes_pinned + space_info->bytes_readonly +
5138                 space_info->bytes_may_use;
5139
5140         /*
5141          * If we have enough space then hooray, make our reservation and carry
5142          * on.  If not see if we can overcommit, and if we can, hooray carry on.
5143          * If not things get more complicated.
5144          */
5145         if (used + orig_bytes <= space_info->total_bytes) {
5146                 space_info->bytes_may_use += orig_bytes;
5147                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5148                                               space_info->flags, orig_bytes,
5149                                               1);
5150                 ret = 0;
5151         } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
5152                 space_info->bytes_may_use += orig_bytes;
5153                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5154                                               space_info->flags, orig_bytes,
5155                                               1);
5156                 ret = 0;
5157         }
5158
5159         /*
5160          * If we couldn't make a reservation then setup our reservation ticket
5161          * and kick the async worker if it's not already running.
5162          *
5163          * If we are a priority flusher then we just need to add our ticket to
5164          * the list and we will do our own flushing further down.
5165          */
5166         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5167                 ticket.bytes = orig_bytes;
5168                 ticket.error = 0;
5169                 init_waitqueue_head(&ticket.wait);
5170                 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5171                         list_add_tail(&ticket.list, &space_info->tickets);
5172                         if (!space_info->flush) {
5173                                 space_info->flush = 1;
5174                                 trace_btrfs_trigger_flush(root->fs_info,
5175                                                           space_info->flags,
5176                                                           orig_bytes, flush,
5177                                                           "enospc");
5178                                 queue_work(system_unbound_wq,
5179                                            &root->fs_info->async_reclaim_work);
5180                         }
5181                 } else {
5182                         list_add_tail(&ticket.list,
5183                                       &space_info->priority_tickets);
5184                 }
5185         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5186                 used += orig_bytes;
5187                 /*
5188                  * We will do the space reservation dance during log replay,
5189                  * which means we won't have fs_info->fs_root set, so don't do
5190                  * the async reclaim as we will panic.
5191                  */
5192                 if (!root->fs_info->log_root_recovering &&
5193                     need_do_async_reclaim(space_info, root, used) &&
5194                     !work_busy(&root->fs_info->async_reclaim_work)) {
5195                         trace_btrfs_trigger_flush(root->fs_info,
5196                                                   space_info->flags,
5197                                                   orig_bytes, flush,
5198                                                   "preempt");
5199                         queue_work(system_unbound_wq,
5200                                    &root->fs_info->async_reclaim_work);
5201                 }
5202         }
5203         spin_unlock(&space_info->lock);
5204         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5205                 return ret;
5206
5207         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5208                 return wait_reserve_ticket(root->fs_info, space_info, &ticket,
5209                                            orig_bytes);
5210
5211         ret = 0;
5212         priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
5213         spin_lock(&space_info->lock);
5214         if (ticket.bytes) {
5215                 if (ticket.bytes < orig_bytes) {
5216                         u64 num_bytes = orig_bytes - ticket.bytes;
5217                         space_info->bytes_may_use -= num_bytes;
5218                         trace_btrfs_space_reservation(root->fs_info,
5219                                         "space_info", space_info->flags,
5220                                         num_bytes, 0);
5221
5222                 }
5223                 list_del_init(&ticket.list);
5224                 ret = -ENOSPC;
5225         }
5226         spin_unlock(&space_info->lock);
5227         ASSERT(list_empty(&ticket.list));
5228         return ret;
5229 }
5230
5231 /**
5232  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5233  * @root - the root we're allocating for
5234  * @block_rsv - the block_rsv we're allocating for
5235  * @orig_bytes - the number of bytes we want
5236  * @flush - whether or not we can flush to make our reservation
5237  *
5238  * This will reserve orgi_bytes number of bytes from the space info associated
5239  * with the block_rsv.  If there is not enough space it will make an attempt to
5240  * flush out space to make room.  It will do this by flushing delalloc if
5241  * possible or committing the transaction.  If flush is 0 then no attempts to
5242  * regain reservations will be made and this will fail if there is not enough
5243  * space already.
5244  */
5245 static int reserve_metadata_bytes(struct btrfs_root *root,
5246                                   struct btrfs_block_rsv *block_rsv,
5247                                   u64 orig_bytes,
5248                                   enum btrfs_reserve_flush_enum flush)
5249 {
5250         int ret;
5251
5252         ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
5253                                        flush);
5254         if (ret == -ENOSPC &&
5255             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5256                 struct btrfs_block_rsv *global_rsv =
5257                         &root->fs_info->global_block_rsv;
5258
5259                 if (block_rsv != global_rsv &&
5260                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5261                         ret = 0;
5262         }
5263         if (ret == -ENOSPC)
5264                 trace_btrfs_space_reservation(root->fs_info,
5265                                               "space_info:enospc",
5266                                               block_rsv->space_info->flags,
5267                                               orig_bytes, 1);
5268         return ret;
5269 }
5270
5271 static struct btrfs_block_rsv *get_block_rsv(
5272                                         const struct btrfs_trans_handle *trans,
5273                                         const struct btrfs_root *root)
5274 {
5275         struct btrfs_block_rsv *block_rsv = NULL;
5276
5277         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5278             (root == root->fs_info->csum_root && trans->adding_csums) ||
5279              (root == root->fs_info->uuid_root))
5280                 block_rsv = trans->block_rsv;
5281
5282         if (!block_rsv)
5283                 block_rsv = root->block_rsv;
5284
5285         if (!block_rsv)
5286                 block_rsv = &root->fs_info->empty_block_rsv;
5287
5288         return block_rsv;
5289 }
5290
5291 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5292                                u64 num_bytes)
5293 {
5294         int ret = -ENOSPC;
5295         spin_lock(&block_rsv->lock);
5296         if (block_rsv->reserved >= num_bytes) {
5297                 block_rsv->reserved -= num_bytes;
5298                 if (block_rsv->reserved < block_rsv->size)
5299                         block_rsv->full = 0;
5300                 ret = 0;
5301         }
5302         spin_unlock(&block_rsv->lock);
5303         return ret;
5304 }
5305
5306 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5307                                 u64 num_bytes, int update_size)
5308 {
5309         spin_lock(&block_rsv->lock);
5310         block_rsv->reserved += num_bytes;
5311         if (update_size)
5312                 block_rsv->size += num_bytes;
5313         else if (block_rsv->reserved >= block_rsv->size)
5314                 block_rsv->full = 1;
5315         spin_unlock(&block_rsv->lock);
5316 }
5317
5318 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5319                              struct btrfs_block_rsv *dest, u64 num_bytes,
5320                              int min_factor)
5321 {
5322         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5323         u64 min_bytes;
5324
5325         if (global_rsv->space_info != dest->space_info)
5326                 return -ENOSPC;
5327
5328         spin_lock(&global_rsv->lock);
5329         min_bytes = div_factor(global_rsv->size, min_factor);
5330         if (global_rsv->reserved < min_bytes + num_bytes) {
5331                 spin_unlock(&global_rsv->lock);
5332                 return -ENOSPC;
5333         }
5334         global_rsv->reserved -= num_bytes;
5335         if (global_rsv->reserved < global_rsv->size)
5336                 global_rsv->full = 0;
5337         spin_unlock(&global_rsv->lock);
5338
5339         block_rsv_add_bytes(dest, num_bytes, 1);
5340         return 0;
5341 }
5342
5343 /*
5344  * This is for space we already have accounted in space_info->bytes_may_use, so
5345  * basically when we're returning space from block_rsv's.
5346  */
5347 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5348                                      struct btrfs_space_info *space_info,
5349                                      u64 num_bytes)
5350 {
5351         struct reserve_ticket *ticket;
5352         struct list_head *head;
5353         u64 used;
5354         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5355         bool check_overcommit = false;
5356
5357         spin_lock(&space_info->lock);
5358         head = &space_info->priority_tickets;
5359
5360         /*
5361          * If we are over our limit then we need to check and see if we can
5362          * overcommit, and if we can't then we just need to free up our space
5363          * and not satisfy any requests.
5364          */
5365         used = space_info->bytes_used + space_info->bytes_reserved +
5366                 space_info->bytes_pinned + space_info->bytes_readonly +
5367                 space_info->bytes_may_use;
5368         if (used - num_bytes >= space_info->total_bytes)
5369                 check_overcommit = true;
5370 again:
5371         while (!list_empty(head) && num_bytes) {
5372                 ticket = list_first_entry(head, struct reserve_ticket,
5373                                           list);
5374                 /*
5375                  * We use 0 bytes because this space is already reserved, so
5376                  * adding the ticket space would be a double count.
5377                  */
5378                 if (check_overcommit &&
5379                     !can_overcommit(fs_info->extent_root, space_info, 0,
5380                                     flush))
5381                         break;
5382                 if (num_bytes >= ticket->bytes) {
5383                         list_del_init(&ticket->list);
5384                         num_bytes -= ticket->bytes;
5385                         ticket->bytes = 0;
5386                         space_info->tickets_id++;
5387                         wake_up(&ticket->wait);
5388                 } else {
5389                         ticket->bytes -= num_bytes;
5390                         num_bytes = 0;
5391                 }
5392         }
5393
5394         if (num_bytes && head == &space_info->priority_tickets) {
5395                 head = &space_info->tickets;
5396                 flush = BTRFS_RESERVE_FLUSH_ALL;
5397                 goto again;
5398         }
5399         space_info->bytes_may_use -= num_bytes;
5400         trace_btrfs_space_reservation(fs_info, "space_info",
5401                                       space_info->flags, num_bytes, 0);
5402         spin_unlock(&space_info->lock);
5403 }
5404
5405 /*
5406  * This is for newly allocated space that isn't accounted in
5407  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5408  * we use this helper.
5409  */
5410 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5411                                      struct btrfs_space_info *space_info,
5412                                      u64 num_bytes)
5413 {
5414         struct reserve_ticket *ticket;
5415         struct list_head *head = &space_info->priority_tickets;
5416
5417 again:
5418         while (!list_empty(head) && num_bytes) {
5419                 ticket = list_first_entry(head, struct reserve_ticket,
5420                                           list);
5421                 if (num_bytes >= ticket->bytes) {
5422                         trace_btrfs_space_reservation(fs_info, "space_info",
5423                                                       space_info->flags,
5424                                                       ticket->bytes, 1);
5425                         list_del_init(&ticket->list);
5426                         num_bytes -= ticket->bytes;
5427                         space_info->bytes_may_use += ticket->bytes;
5428                         ticket->bytes = 0;
5429                         space_info->tickets_id++;
5430                         wake_up(&ticket->wait);
5431                 } else {
5432                         trace_btrfs_space_reservation(fs_info, "space_info",
5433                                                       space_info->flags,
5434                                                       num_bytes, 1);
5435                         space_info->bytes_may_use += num_bytes;
5436                         ticket->bytes -= num_bytes;
5437                         num_bytes = 0;
5438                 }
5439         }
5440
5441         if (num_bytes && head == &space_info->priority_tickets) {
5442                 head = &space_info->tickets;
5443                 goto again;
5444         }
5445 }
5446
5447 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5448                                     struct btrfs_block_rsv *block_rsv,
5449                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5450 {
5451         struct btrfs_space_info *space_info = block_rsv->space_info;
5452
5453         spin_lock(&block_rsv->lock);
5454         if (num_bytes == (u64)-1)
5455                 num_bytes = block_rsv->size;
5456         block_rsv->size -= num_bytes;
5457         if (block_rsv->reserved >= block_rsv->size) {
5458                 num_bytes = block_rsv->reserved - block_rsv->size;
5459                 block_rsv->reserved = block_rsv->size;
5460                 block_rsv->full = 1;
5461         } else {
5462                 num_bytes = 0;
5463         }
5464         spin_unlock(&block_rsv->lock);
5465
5466         if (num_bytes > 0) {
5467                 if (dest) {
5468                         spin_lock(&dest->lock);
5469                         if (!dest->full) {
5470                                 u64 bytes_to_add;
5471
5472                                 bytes_to_add = dest->size - dest->reserved;
5473                                 bytes_to_add = min(num_bytes, bytes_to_add);
5474                                 dest->reserved += bytes_to_add;
5475                                 if (dest->reserved >= dest->size)
5476                                         dest->full = 1;
5477                                 num_bytes -= bytes_to_add;
5478                         }
5479                         spin_unlock(&dest->lock);
5480                 }
5481                 if (num_bytes)
5482                         space_info_add_old_bytes(fs_info, space_info,
5483                                                  num_bytes);
5484         }
5485 }
5486
5487 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5488                             struct btrfs_block_rsv *dst, u64 num_bytes,
5489                             int update_size)
5490 {
5491         int ret;
5492
5493         ret = block_rsv_use_bytes(src, num_bytes);
5494         if (ret)
5495                 return ret;
5496
5497         block_rsv_add_bytes(dst, num_bytes, update_size);
5498         return 0;
5499 }
5500
5501 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5502 {
5503         memset(rsv, 0, sizeof(*rsv));
5504         spin_lock_init(&rsv->lock);
5505         rsv->type = type;
5506 }
5507
5508 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5509                                               unsigned short type)
5510 {
5511         struct btrfs_block_rsv *block_rsv;
5512         struct btrfs_fs_info *fs_info = root->fs_info;
5513
5514         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5515         if (!block_rsv)
5516                 return NULL;
5517
5518         btrfs_init_block_rsv(block_rsv, type);
5519         block_rsv->space_info = __find_space_info(fs_info,
5520                                                   BTRFS_BLOCK_GROUP_METADATA);
5521         return block_rsv;
5522 }
5523
5524 void btrfs_free_block_rsv(struct btrfs_root *root,
5525                           struct btrfs_block_rsv *rsv)
5526 {
5527         if (!rsv)
5528                 return;
5529         btrfs_block_rsv_release(root, rsv, (u64)-1);
5530         kfree(rsv);
5531 }
5532
5533 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5534 {
5535         kfree(rsv);
5536 }
5537
5538 int btrfs_block_rsv_add(struct btrfs_root *root,
5539                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5540                         enum btrfs_reserve_flush_enum flush)
5541 {
5542         int ret;
5543
5544         if (num_bytes == 0)
5545                 return 0;
5546
5547         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5548         if (!ret) {
5549                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5550                 return 0;
5551         }
5552
5553         return ret;
5554 }
5555
5556 int btrfs_block_rsv_check(struct btrfs_root *root,
5557                           struct btrfs_block_rsv *block_rsv, int min_factor)
5558 {
5559         u64 num_bytes = 0;
5560         int ret = -ENOSPC;
5561
5562         if (!block_rsv)
5563                 return 0;
5564
5565         spin_lock(&block_rsv->lock);
5566         num_bytes = div_factor(block_rsv->size, min_factor);
5567         if (block_rsv->reserved >= num_bytes)
5568                 ret = 0;
5569         spin_unlock(&block_rsv->lock);
5570
5571         return ret;
5572 }
5573
5574 int btrfs_block_rsv_refill(struct btrfs_root *root,
5575                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5576                            enum btrfs_reserve_flush_enum flush)
5577 {
5578         u64 num_bytes = 0;
5579         int ret = -ENOSPC;
5580
5581         if (!block_rsv)
5582                 return 0;
5583
5584         spin_lock(&block_rsv->lock);
5585         num_bytes = min_reserved;
5586         if (block_rsv->reserved >= num_bytes)
5587                 ret = 0;
5588         else
5589                 num_bytes -= block_rsv->reserved;
5590         spin_unlock(&block_rsv->lock);
5591
5592         if (!ret)
5593                 return 0;
5594
5595         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5596         if (!ret) {
5597                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5598                 return 0;
5599         }
5600
5601         return ret;
5602 }
5603
5604 void btrfs_block_rsv_release(struct btrfs_root *root,
5605                              struct btrfs_block_rsv *block_rsv,
5606                              u64 num_bytes)
5607 {
5608         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5609         if (global_rsv == block_rsv ||
5610             block_rsv->space_info != global_rsv->space_info)
5611                 global_rsv = NULL;
5612         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5613                                 num_bytes);
5614 }
5615
5616 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5617 {
5618         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5619         struct btrfs_space_info *sinfo = block_rsv->space_info;
5620         u64 num_bytes;
5621
5622         /*
5623          * The global block rsv is based on the size of the extent tree, the
5624          * checksum tree and the root tree.  If the fs is empty we want to set
5625          * it to a minimal amount for safety.
5626          */
5627         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5628                 btrfs_root_used(&fs_info->csum_root->root_item) +
5629                 btrfs_root_used(&fs_info->tree_root->root_item);
5630         num_bytes = max_t(u64, num_bytes, SZ_16M);
5631
5632         spin_lock(&sinfo->lock);
5633         spin_lock(&block_rsv->lock);
5634
5635         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5636
5637         if (block_rsv->reserved < block_rsv->size) {
5638                 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5639                         sinfo->bytes_reserved + sinfo->bytes_readonly +
5640                         sinfo->bytes_may_use;
5641                 if (sinfo->total_bytes > num_bytes) {
5642                         num_bytes = sinfo->total_bytes - num_bytes;
5643                         num_bytes = min(num_bytes,
5644                                         block_rsv->size - block_rsv->reserved);
5645                         block_rsv->reserved += num_bytes;
5646                         sinfo->bytes_may_use += num_bytes;
5647                         trace_btrfs_space_reservation(fs_info, "space_info",
5648                                                       sinfo->flags, num_bytes,
5649                                                       1);
5650                 }
5651         } else if (block_rsv->reserved > block_rsv->size) {
5652                 num_bytes = block_rsv->reserved - block_rsv->size;
5653                 sinfo->bytes_may_use -= num_bytes;
5654                 trace_btrfs_space_reservation(fs_info, "space_info",
5655                                       sinfo->flags, num_bytes, 0);
5656                 block_rsv->reserved = block_rsv->size;
5657         }
5658
5659         if (block_rsv->reserved == block_rsv->size)
5660                 block_rsv->full = 1;
5661         else
5662                 block_rsv->full = 0;
5663
5664         spin_unlock(&block_rsv->lock);
5665         spin_unlock(&sinfo->lock);
5666 }
5667
5668 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5669 {
5670         struct btrfs_space_info *space_info;
5671
5672         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5673         fs_info->chunk_block_rsv.space_info = space_info;
5674
5675         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5676         fs_info->global_block_rsv.space_info = space_info;
5677         fs_info->delalloc_block_rsv.space_info = space_info;
5678         fs_info->trans_block_rsv.space_info = space_info;
5679         fs_info->empty_block_rsv.space_info = space_info;
5680         fs_info->delayed_block_rsv.space_info = space_info;
5681
5682         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5683         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5684         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5685         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5686         if (fs_info->quota_root)
5687                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5688         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5689
5690         update_global_block_rsv(fs_info);
5691 }
5692
5693 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5694 {
5695         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5696                                 (u64)-1);
5697         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5698         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5699         WARN_ON(fs_info->trans_block_rsv.size > 0);
5700         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5701         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5702         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5703         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5704         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5705 }
5706
5707 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5708                                   struct btrfs_root *root)
5709 {
5710         if (!trans->block_rsv)
5711                 return;
5712
5713         if (!trans->bytes_reserved)
5714                 return;
5715
5716         trace_btrfs_space_reservation(root->fs_info, "transaction",
5717                                       trans->transid, trans->bytes_reserved, 0);
5718         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5719         trans->bytes_reserved = 0;
5720 }
5721
5722 /*
5723  * To be called after all the new block groups attached to the transaction
5724  * handle have been created (btrfs_create_pending_block_groups()).
5725  */
5726 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5727 {
5728         struct btrfs_fs_info *fs_info = trans->fs_info;
5729
5730         if (!trans->chunk_bytes_reserved)
5731                 return;
5732
5733         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5734
5735         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5736                                 trans->chunk_bytes_reserved);
5737         trans->chunk_bytes_reserved = 0;
5738 }
5739
5740 /* Can only return 0 or -ENOSPC */
5741 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5742                                   struct inode *inode)
5743 {
5744         struct btrfs_root *root = BTRFS_I(inode)->root;
5745         /*
5746          * We always use trans->block_rsv here as we will have reserved space
5747          * for our orphan when starting the transaction, using get_block_rsv()
5748          * here will sometimes make us choose the wrong block rsv as we could be
5749          * doing a reloc inode for a non refcounted root.
5750          */
5751         struct btrfs_block_rsv *src_rsv = trans->block_rsv;
5752         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5753
5754         /*
5755          * We need to hold space in order to delete our orphan item once we've
5756          * added it, so this takes the reservation so we can release it later
5757          * when we are truly done with the orphan item.
5758          */
5759         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5760         trace_btrfs_space_reservation(root->fs_info, "orphan",
5761                                       btrfs_ino(inode), num_bytes, 1);
5762         return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
5763 }
5764
5765 void btrfs_orphan_release_metadata(struct inode *inode)
5766 {
5767         struct btrfs_root *root = BTRFS_I(inode)->root;
5768         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5769         trace_btrfs_space_reservation(root->fs_info, "orphan",
5770                                       btrfs_ino(inode), num_bytes, 0);
5771         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5772 }
5773
5774 /*
5775  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5776  * root: the root of the parent directory
5777  * rsv: block reservation
5778  * items: the number of items that we need do reservation
5779  * qgroup_reserved: used to return the reserved size in qgroup
5780  *
5781  * This function is used to reserve the space for snapshot/subvolume
5782  * creation and deletion. Those operations are different with the
5783  * common file/directory operations, they change two fs/file trees
5784  * and root tree, the number of items that the qgroup reserves is
5785  * different with the free space reservation. So we can not use
5786  * the space reservation mechanism in start_transaction().
5787  */
5788 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5789                                      struct btrfs_block_rsv *rsv,
5790                                      int items,
5791                                      u64 *qgroup_reserved,
5792                                      bool use_global_rsv)
5793 {
5794         u64 num_bytes;
5795         int ret;
5796         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5797
5798         if (root->fs_info->quota_enabled) {
5799                 /* One for parent inode, two for dir entries */
5800                 num_bytes = 3 * root->nodesize;
5801                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5802                 if (ret)
5803                         return ret;
5804         } else {
5805                 num_bytes = 0;
5806         }
5807
5808         *qgroup_reserved = num_bytes;
5809
5810         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5811         rsv->space_info = __find_space_info(root->fs_info,
5812                                             BTRFS_BLOCK_GROUP_METADATA);
5813         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5814                                   BTRFS_RESERVE_FLUSH_ALL);
5815
5816         if (ret == -ENOSPC && use_global_rsv)
5817                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5818
5819         if (ret && *qgroup_reserved)
5820                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5821
5822         return ret;
5823 }
5824
5825 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5826                                       struct btrfs_block_rsv *rsv,
5827                                       u64 qgroup_reserved)
5828 {
5829         btrfs_block_rsv_release(root, rsv, (u64)-1);
5830 }
5831
5832 /**
5833  * drop_outstanding_extent - drop an outstanding extent
5834  * @inode: the inode we're dropping the extent for
5835  * @num_bytes: the number of bytes we're releasing.
5836  *
5837  * This is called when we are freeing up an outstanding extent, either called
5838  * after an error or after an extent is written.  This will return the number of
5839  * reserved extents that need to be freed.  This must be called with
5840  * BTRFS_I(inode)->lock held.
5841  */
5842 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5843 {
5844         unsigned drop_inode_space = 0;
5845         unsigned dropped_extents = 0;
5846         unsigned num_extents = 0;
5847
5848         num_extents = (unsigned)div64_u64(num_bytes +
5849                                           BTRFS_MAX_EXTENT_SIZE - 1,
5850                                           BTRFS_MAX_EXTENT_SIZE);
5851         ASSERT(num_extents);
5852         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5853         BTRFS_I(inode)->outstanding_extents -= num_extents;
5854
5855         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5856             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5857                                &BTRFS_I(inode)->runtime_flags))
5858                 drop_inode_space = 1;
5859
5860         /*
5861          * If we have more or the same amount of outstanding extents than we have
5862          * reserved then we need to leave the reserved extents count alone.
5863          */
5864         if (BTRFS_I(inode)->outstanding_extents >=
5865             BTRFS_I(inode)->reserved_extents)
5866                 return drop_inode_space;
5867
5868         dropped_extents = BTRFS_I(inode)->reserved_extents -
5869                 BTRFS_I(inode)->outstanding_extents;
5870         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5871         return dropped_extents + drop_inode_space;
5872 }
5873
5874 /**
5875  * calc_csum_metadata_size - return the amount of metadata space that must be
5876  *      reserved/freed for the given bytes.
5877  * @inode: the inode we're manipulating
5878  * @num_bytes: the number of bytes in question
5879  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5880  *
5881  * This adjusts the number of csum_bytes in the inode and then returns the
5882  * correct amount of metadata that must either be reserved or freed.  We
5883  * calculate how many checksums we can fit into one leaf and then divide the
5884  * number of bytes that will need to be checksumed by this value to figure out
5885  * how many checksums will be required.  If we are adding bytes then the number
5886  * may go up and we will return the number of additional bytes that must be
5887  * reserved.  If it is going down we will return the number of bytes that must
5888  * be freed.
5889  *
5890  * This must be called with BTRFS_I(inode)->lock held.
5891  */
5892 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5893                                    int reserve)
5894 {
5895         struct btrfs_root *root = BTRFS_I(inode)->root;
5896         u64 old_csums, num_csums;
5897
5898         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5899             BTRFS_I(inode)->csum_bytes == 0)
5900                 return 0;
5901
5902         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5903         if (reserve)
5904                 BTRFS_I(inode)->csum_bytes += num_bytes;
5905         else
5906                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5907         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5908
5909         /* No change, no need to reserve more */
5910         if (old_csums == num_csums)
5911                 return 0;
5912
5913         if (reserve)
5914                 return btrfs_calc_trans_metadata_size(root,
5915                                                       num_csums - old_csums);
5916
5917         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5918 }
5919
5920 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5921 {
5922         struct btrfs_root *root = BTRFS_I(inode)->root;
5923         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5924         u64 to_reserve = 0;
5925         u64 csum_bytes;
5926         unsigned nr_extents = 0;
5927         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5928         int ret = 0;
5929         bool delalloc_lock = true;
5930         u64 to_free = 0;
5931         unsigned dropped;
5932         bool release_extra = false;
5933
5934         /* If we are a free space inode we need to not flush since we will be in
5935          * the middle of a transaction commit.  We also don't need the delalloc
5936          * mutex since we won't race with anybody.  We need this mostly to make
5937          * lockdep shut its filthy mouth.
5938          *
5939          * If we have a transaction open (can happen if we call truncate_block
5940          * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
5941          */
5942         if (btrfs_is_free_space_inode(inode)) {
5943                 flush = BTRFS_RESERVE_NO_FLUSH;
5944                 delalloc_lock = false;
5945         } else if (current->journal_info) {
5946                 flush = BTRFS_RESERVE_FLUSH_LIMIT;
5947         }
5948
5949         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5950             btrfs_transaction_in_commit(root->fs_info))
5951                 schedule_timeout(1);
5952
5953         if (delalloc_lock)
5954                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5955
5956         num_bytes = ALIGN(num_bytes, root->sectorsize);
5957
5958         spin_lock(&BTRFS_I(inode)->lock);
5959         nr_extents = (unsigned)div64_u64(num_bytes +
5960                                          BTRFS_MAX_EXTENT_SIZE - 1,
5961                                          BTRFS_MAX_EXTENT_SIZE);
5962         BTRFS_I(inode)->outstanding_extents += nr_extents;
5963
5964         nr_extents = 0;
5965         if (BTRFS_I(inode)->outstanding_extents >
5966             BTRFS_I(inode)->reserved_extents)
5967                 nr_extents += BTRFS_I(inode)->outstanding_extents -
5968                         BTRFS_I(inode)->reserved_extents;
5969
5970         /* We always want to reserve a slot for updating the inode. */
5971         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
5972         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5973         csum_bytes = BTRFS_I(inode)->csum_bytes;
5974         spin_unlock(&BTRFS_I(inode)->lock);
5975
5976         if (root->fs_info->quota_enabled) {
5977                 ret = btrfs_qgroup_reserve_meta(root,
5978                                 nr_extents * root->nodesize);
5979                 if (ret)
5980                         goto out_fail;
5981         }
5982
5983         ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
5984         if (unlikely(ret)) {
5985                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5986                 goto out_fail;
5987         }
5988
5989         spin_lock(&BTRFS_I(inode)->lock);
5990         if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5991                              &BTRFS_I(inode)->runtime_flags)) {
5992                 to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
5993                 release_extra = true;
5994         }
5995         BTRFS_I(inode)->reserved_extents += nr_extents;
5996         spin_unlock(&BTRFS_I(inode)->lock);
5997
5998         if (delalloc_lock)
5999                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6000
6001         if (to_reserve)
6002                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6003                                               btrfs_ino(inode), to_reserve, 1);
6004         if (release_extra)
6005                 btrfs_block_rsv_release(root, block_rsv,
6006                                         btrfs_calc_trans_metadata_size(root,
6007                                                                        1));
6008         return 0;
6009
6010 out_fail:
6011         spin_lock(&BTRFS_I(inode)->lock);
6012         dropped = drop_outstanding_extent(inode, num_bytes);
6013         /*
6014          * If the inodes csum_bytes is the same as the original
6015          * csum_bytes then we know we haven't raced with any free()ers
6016          * so we can just reduce our inodes csum bytes and carry on.
6017          */
6018         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
6019                 calc_csum_metadata_size(inode, num_bytes, 0);
6020         } else {
6021                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
6022                 u64 bytes;
6023
6024                 /*
6025                  * This is tricky, but first we need to figure out how much we
6026                  * freed from any free-ers that occurred during this
6027                  * reservation, so we reset ->csum_bytes to the csum_bytes
6028                  * before we dropped our lock, and then call the free for the
6029                  * number of bytes that were freed while we were trying our
6030                  * reservation.
6031                  */
6032                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
6033                 BTRFS_I(inode)->csum_bytes = csum_bytes;
6034                 to_free = calc_csum_metadata_size(inode, bytes, 0);
6035
6036
6037                 /*
6038                  * Now we need to see how much we would have freed had we not
6039                  * been making this reservation and our ->csum_bytes were not
6040                  * artificially inflated.
6041                  */
6042                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
6043                 bytes = csum_bytes - orig_csum_bytes;
6044                 bytes = calc_csum_metadata_size(inode, bytes, 0);
6045
6046                 /*
6047                  * Now reset ->csum_bytes to what it should be.  If bytes is
6048                  * more than to_free then we would have freed more space had we
6049                  * not had an artificially high ->csum_bytes, so we need to free
6050                  * the remainder.  If bytes is the same or less then we don't
6051                  * need to do anything, the other free-ers did the correct
6052                  * thing.
6053                  */
6054                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
6055                 if (bytes > to_free)
6056                         to_free = bytes - to_free;
6057                 else
6058                         to_free = 0;
6059         }
6060         spin_unlock(&BTRFS_I(inode)->lock);
6061         if (dropped)
6062                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6063
6064         if (to_free) {
6065                 btrfs_block_rsv_release(root, block_rsv, to_free);
6066                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6067                                               btrfs_ino(inode), to_free, 0);
6068         }
6069         if (delalloc_lock)
6070                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6071         return ret;
6072 }
6073
6074 /**
6075  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6076  * @inode: the inode to release the reservation for
6077  * @num_bytes: the number of bytes we're releasing
6078  *
6079  * This will release the metadata reservation for an inode.  This can be called
6080  * once we complete IO for a given set of bytes to release their metadata
6081  * reservations.
6082  */
6083 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
6084 {
6085         struct btrfs_root *root = BTRFS_I(inode)->root;
6086         u64 to_free = 0;
6087         unsigned dropped;
6088
6089         num_bytes = ALIGN(num_bytes, root->sectorsize);
6090         spin_lock(&BTRFS_I(inode)->lock);
6091         dropped = drop_outstanding_extent(inode, num_bytes);
6092
6093         if (num_bytes)
6094                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
6095         spin_unlock(&BTRFS_I(inode)->lock);
6096         if (dropped > 0)
6097                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6098
6099         if (btrfs_is_testing(root->fs_info))
6100                 return;
6101
6102         trace_btrfs_space_reservation(root->fs_info, "delalloc",
6103                                       btrfs_ino(inode), to_free, 0);
6104
6105         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
6106                                 to_free);
6107 }
6108
6109 /**
6110  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6111  * delalloc
6112  * @inode: inode we're writing to
6113  * @start: start range we are writing to
6114  * @len: how long the range we are writing to
6115  *
6116  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
6117  *
6118  * This will do the following things
6119  *
6120  * o reserve space in data space info for num bytes
6121  *   and reserve precious corresponding qgroup space
6122  *   (Done in check_data_free_space)
6123  *
6124  * o reserve space for metadata space, based on the number of outstanding
6125  *   extents and how much csums will be needed
6126  *   also reserve metadata space in a per root over-reserve method.
6127  * o add to the inodes->delalloc_bytes
6128  * o add it to the fs_info's delalloc inodes list.
6129  *   (Above 3 all done in delalloc_reserve_metadata)
6130  *
6131  * Return 0 for success
6132  * Return <0 for error(-ENOSPC or -EQUOT)
6133  */
6134 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
6135 {
6136         int ret;
6137
6138         ret = btrfs_check_data_free_space(inode, start, len);
6139         if (ret < 0)
6140                 return ret;
6141         ret = btrfs_delalloc_reserve_metadata(inode, len);
6142         if (ret < 0)
6143                 btrfs_free_reserved_data_space(inode, start, len);
6144         return ret;
6145 }
6146
6147 /**
6148  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6149  * @inode: inode we're releasing space for
6150  * @start: start position of the space already reserved
6151  * @len: the len of the space already reserved
6152  *
6153  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
6154  * called in the case that we don't need the metadata AND data reservations
6155  * anymore.  So if there is an error or we insert an inline extent.
6156  *
6157  * This function will release the metadata space that was not used and will
6158  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6159  * list if there are no delalloc bytes left.
6160  * Also it will handle the qgroup reserved space.
6161  */
6162 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
6163 {
6164         btrfs_delalloc_release_metadata(inode, len);
6165         btrfs_free_reserved_data_space(inode, start, len);
6166 }
6167
6168 static int update_block_group(struct btrfs_trans_handle *trans,
6169                               struct btrfs_root *root, u64 bytenr,
6170                               u64 num_bytes, int alloc)
6171 {
6172         struct btrfs_block_group_cache *cache = NULL;
6173         struct btrfs_fs_info *info = root->fs_info;
6174         u64 total = num_bytes;
6175         u64 old_val;
6176         u64 byte_in_group;
6177         int factor;
6178
6179         /* block accounting for super block */
6180         spin_lock(&info->delalloc_root_lock);
6181         old_val = btrfs_super_bytes_used(info->super_copy);
6182         if (alloc)
6183                 old_val += num_bytes;
6184         else
6185                 old_val -= num_bytes;
6186         btrfs_set_super_bytes_used(info->super_copy, old_val);
6187         spin_unlock(&info->delalloc_root_lock);
6188
6189         while (total) {
6190                 cache = btrfs_lookup_block_group(info, bytenr);
6191                 if (!cache)
6192                         return -ENOENT;
6193                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
6194                                     BTRFS_BLOCK_GROUP_RAID1 |
6195                                     BTRFS_BLOCK_GROUP_RAID10))
6196                         factor = 2;
6197                 else
6198                         factor = 1;
6199                 /*
6200                  * If this block group has free space cache written out, we
6201                  * need to make sure to load it if we are removing space.  This
6202                  * is because we need the unpinning stage to actually add the
6203                  * space back to the block group, otherwise we will leak space.
6204                  */
6205                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6206                         cache_block_group(cache, 1);
6207
6208                 byte_in_group = bytenr - cache->key.objectid;
6209                 WARN_ON(byte_in_group > cache->key.offset);
6210
6211                 spin_lock(&cache->space_info->lock);
6212                 spin_lock(&cache->lock);
6213
6214                 if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
6215                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6216                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6217
6218                 old_val = btrfs_block_group_used(&cache->item);
6219                 num_bytes = min(total, cache->key.offset - byte_in_group);
6220                 if (alloc) {
6221                         old_val += num_bytes;
6222                         btrfs_set_block_group_used(&cache->item, old_val);
6223                         cache->reserved -= num_bytes;
6224                         cache->space_info->bytes_reserved -= num_bytes;
6225                         cache->space_info->bytes_used += num_bytes;
6226                         cache->space_info->disk_used += num_bytes * factor;
6227                         spin_unlock(&cache->lock);
6228                         spin_unlock(&cache->space_info->lock);
6229                 } else {
6230                         old_val -= num_bytes;
6231                         btrfs_set_block_group_used(&cache->item, old_val);
6232                         cache->pinned += num_bytes;
6233                         cache->space_info->bytes_pinned += num_bytes;
6234                         cache->space_info->bytes_used -= num_bytes;
6235                         cache->space_info->disk_used -= num_bytes * factor;
6236                         spin_unlock(&cache->lock);
6237                         spin_unlock(&cache->space_info->lock);
6238
6239                         trace_btrfs_space_reservation(root->fs_info, "pinned",
6240                                                       cache->space_info->flags,
6241                                                       num_bytes, 1);
6242                         set_extent_dirty(info->pinned_extents,
6243                                          bytenr, bytenr + num_bytes - 1,
6244                                          GFP_NOFS | __GFP_NOFAIL);
6245                 }
6246
6247                 spin_lock(&trans->transaction->dirty_bgs_lock);
6248                 if (list_empty(&cache->dirty_list)) {
6249                         list_add_tail(&cache->dirty_list,
6250                                       &trans->transaction->dirty_bgs);
6251                                 trans->transaction->num_dirty_bgs++;
6252                         btrfs_get_block_group(cache);
6253                 }
6254                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6255
6256                 /*
6257                  * No longer have used bytes in this block group, queue it for
6258                  * deletion. We do this after adding the block group to the
6259                  * dirty list to avoid races between cleaner kthread and space
6260                  * cache writeout.
6261                  */
6262                 if (!alloc && old_val == 0) {
6263                         spin_lock(&info->unused_bgs_lock);
6264                         if (list_empty(&cache->bg_list)) {
6265                                 btrfs_get_block_group(cache);
6266                                 list_add_tail(&cache->bg_list,
6267                                               &info->unused_bgs);
6268                         }
6269                         spin_unlock(&info->unused_bgs_lock);
6270                 }
6271
6272                 btrfs_put_block_group(cache);
6273                 total -= num_bytes;
6274                 bytenr += num_bytes;
6275         }
6276         return 0;
6277 }
6278
6279 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
6280 {
6281         struct btrfs_block_group_cache *cache;
6282         u64 bytenr;
6283
6284         spin_lock(&root->fs_info->block_group_cache_lock);
6285         bytenr = root->fs_info->first_logical_byte;
6286         spin_unlock(&root->fs_info->block_group_cache_lock);
6287
6288         if (bytenr < (u64)-1)
6289                 return bytenr;
6290
6291         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
6292         if (!cache)
6293                 return 0;
6294
6295         bytenr = cache->key.objectid;
6296         btrfs_put_block_group(cache);
6297
6298         return bytenr;
6299 }
6300
6301 static int pin_down_extent(struct btrfs_root *root,
6302                            struct btrfs_block_group_cache *cache,
6303                            u64 bytenr, u64 num_bytes, int reserved)
6304 {
6305         spin_lock(&cache->space_info->lock);
6306         spin_lock(&cache->lock);
6307         cache->pinned += num_bytes;
6308         cache->space_info->bytes_pinned += num_bytes;
6309         if (reserved) {
6310                 cache->reserved -= num_bytes;
6311                 cache->space_info->bytes_reserved -= num_bytes;
6312         }
6313         spin_unlock(&cache->lock);
6314         spin_unlock(&cache->space_info->lock);
6315
6316         trace_btrfs_space_reservation(root->fs_info, "pinned",
6317                                       cache->space_info->flags, num_bytes, 1);
6318         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6319                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6320         return 0;
6321 }
6322
6323 /*
6324  * this function must be called within transaction
6325  */
6326 int btrfs_pin_extent(struct btrfs_root *root,
6327                      u64 bytenr, u64 num_bytes, int reserved)
6328 {
6329         struct btrfs_block_group_cache *cache;
6330
6331         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6332         BUG_ON(!cache); /* Logic error */
6333
6334         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6335
6336         btrfs_put_block_group(cache);
6337         return 0;
6338 }
6339
6340 /*
6341  * this function must be called within transaction
6342  */
6343 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6344                                     u64 bytenr, u64 num_bytes)
6345 {
6346         struct btrfs_block_group_cache *cache;
6347         int ret;
6348
6349         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6350         if (!cache)
6351                 return -EINVAL;
6352
6353         /*
6354          * pull in the free space cache (if any) so that our pin
6355          * removes the free space from the cache.  We have load_only set
6356          * to one because the slow code to read in the free extents does check
6357          * the pinned extents.
6358          */
6359         cache_block_group(cache, 1);
6360
6361         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6362
6363         /* remove us from the free space cache (if we're there at all) */
6364         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6365         btrfs_put_block_group(cache);
6366         return ret;
6367 }
6368
6369 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6370 {
6371         int ret;
6372         struct btrfs_block_group_cache *block_group;
6373         struct btrfs_caching_control *caching_ctl;
6374
6375         block_group = btrfs_lookup_block_group(root->fs_info, start);
6376         if (!block_group)
6377                 return -EINVAL;
6378
6379         cache_block_group(block_group, 0);
6380         caching_ctl = get_caching_control(block_group);
6381
6382         if (!caching_ctl) {
6383                 /* Logic error */
6384                 BUG_ON(!block_group_cache_done(block_group));
6385                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6386         } else {
6387                 mutex_lock(&caching_ctl->mutex);
6388
6389                 if (start >= caching_ctl->progress) {
6390                         ret = add_excluded_extent(root, start, num_bytes);
6391                 } else if (start + num_bytes <= caching_ctl->progress) {
6392                         ret = btrfs_remove_free_space(block_group,
6393                                                       start, num_bytes);
6394                 } else {
6395                         num_bytes = caching_ctl->progress - start;
6396                         ret = btrfs_remove_free_space(block_group,
6397                                                       start, num_bytes);
6398                         if (ret)
6399                                 goto out_lock;
6400
6401                         num_bytes = (start + num_bytes) -
6402                                 caching_ctl->progress;
6403                         start = caching_ctl->progress;
6404                         ret = add_excluded_extent(root, start, num_bytes);
6405                 }
6406 out_lock:
6407                 mutex_unlock(&caching_ctl->mutex);
6408                 put_caching_control(caching_ctl);
6409         }
6410         btrfs_put_block_group(block_group);
6411         return ret;
6412 }
6413
6414 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6415                                  struct extent_buffer *eb)
6416 {
6417         struct btrfs_file_extent_item *item;
6418         struct btrfs_key key;
6419         int found_type;
6420         int i;
6421
6422         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6423                 return 0;
6424
6425         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6426                 btrfs_item_key_to_cpu(eb, &key, i);
6427                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6428                         continue;
6429                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6430                 found_type = btrfs_file_extent_type(eb, item);
6431                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6432                         continue;
6433                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6434                         continue;
6435                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6436                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6437                 __exclude_logged_extent(log, key.objectid, key.offset);
6438         }
6439
6440         return 0;
6441 }
6442
6443 static void
6444 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6445 {
6446         atomic_inc(&bg->reservations);
6447 }
6448
6449 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6450                                         const u64 start)
6451 {
6452         struct btrfs_block_group_cache *bg;
6453
6454         bg = btrfs_lookup_block_group(fs_info, start);
6455         ASSERT(bg);
6456         if (atomic_dec_and_test(&bg->reservations))
6457                 wake_up_atomic_t(&bg->reservations);
6458         btrfs_put_block_group(bg);
6459 }
6460
6461 static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6462 {
6463         schedule();
6464         return 0;
6465 }
6466
6467 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6468 {
6469         struct btrfs_space_info *space_info = bg->space_info;
6470
6471         ASSERT(bg->ro);
6472
6473         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6474                 return;
6475
6476         /*
6477          * Our block group is read only but before we set it to read only,
6478          * some task might have had allocated an extent from it already, but it
6479          * has not yet created a respective ordered extent (and added it to a
6480          * root's list of ordered extents).
6481          * Therefore wait for any task currently allocating extents, since the
6482          * block group's reservations counter is incremented while a read lock
6483          * on the groups' semaphore is held and decremented after releasing
6484          * the read access on that semaphore and creating the ordered extent.
6485          */
6486         down_write(&space_info->groups_sem);
6487         up_write(&space_info->groups_sem);
6488
6489         wait_on_atomic_t(&bg->reservations,
6490                          btrfs_wait_bg_reservations_atomic_t,
6491                          TASK_UNINTERRUPTIBLE);
6492 }
6493
6494 /**
6495  * btrfs_add_reserved_bytes - update the block_group and space info counters
6496  * @cache:      The cache we are manipulating
6497  * @ram_bytes:  The number of bytes of file content, and will be same to
6498  *              @num_bytes except for the compress path.
6499  * @num_bytes:  The number of bytes in question
6500  * @delalloc:   The blocks are allocated for the delalloc write
6501  *
6502  * This is called by the allocator when it reserves space. Metadata
6503  * reservations should be called with RESERVE_ALLOC so we do the proper
6504  * ENOSPC accounting.  For data we handle the reservation through clearing the
6505  * delalloc bits in the io_tree.  We have to do this since we could end up
6506  * allocating less disk space for the amount of data we have reserved in the
6507  * case of compression.
6508  *
6509  * If this is a reservation and the block group has become read only we cannot
6510  * make the reservation and return -EAGAIN, otherwise this function always
6511  * succeeds.
6512  */
6513 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6514                                     u64 ram_bytes, u64 num_bytes, int delalloc)
6515 {
6516         struct btrfs_space_info *space_info = cache->space_info;
6517         int ret = 0;
6518
6519         spin_lock(&space_info->lock);
6520         spin_lock(&cache->lock);
6521         if (cache->ro) {
6522                 ret = -EAGAIN;
6523         } else {
6524                 cache->reserved += num_bytes;
6525                 space_info->bytes_reserved += num_bytes;
6526
6527                 trace_btrfs_space_reservation(cache->fs_info,
6528                                 "space_info", space_info->flags,
6529                                 ram_bytes, 0);
6530                 space_info->bytes_may_use -= ram_bytes;
6531                 if (delalloc)
6532                         cache->delalloc_bytes += num_bytes;
6533         }
6534         spin_unlock(&cache->lock);
6535         spin_unlock(&space_info->lock);
6536         return ret;
6537 }
6538
6539 /**
6540  * btrfs_free_reserved_bytes - update the block_group and space info counters
6541  * @cache:      The cache we are manipulating
6542  * @num_bytes:  The number of bytes in question
6543  * @delalloc:   The blocks are allocated for the delalloc write
6544  *
6545  * This is called by somebody who is freeing space that was never actually used
6546  * on disk.  For example if you reserve some space for a new leaf in transaction
6547  * A and before transaction A commits you free that leaf, you call this with
6548  * reserve set to 0 in order to clear the reservation.
6549  */
6550
6551 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6552                                      u64 num_bytes, int delalloc)
6553 {
6554         struct btrfs_space_info *space_info = cache->space_info;
6555         int ret = 0;
6556
6557         spin_lock(&space_info->lock);
6558         spin_lock(&cache->lock);
6559         if (cache->ro)
6560                 space_info->bytes_readonly += num_bytes;
6561         cache->reserved -= num_bytes;
6562         space_info->bytes_reserved -= num_bytes;
6563
6564         if (delalloc)
6565                 cache->delalloc_bytes -= num_bytes;
6566         spin_unlock(&cache->lock);
6567         spin_unlock(&space_info->lock);
6568         return ret;
6569 }
6570 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6571                                 struct btrfs_root *root)
6572 {
6573         struct btrfs_fs_info *fs_info = root->fs_info;
6574         struct btrfs_caching_control *next;
6575         struct btrfs_caching_control *caching_ctl;
6576         struct btrfs_block_group_cache *cache;
6577
6578         down_write(&fs_info->commit_root_sem);
6579
6580         list_for_each_entry_safe(caching_ctl, next,
6581                                  &fs_info->caching_block_groups, list) {
6582                 cache = caching_ctl->block_group;
6583                 if (block_group_cache_done(cache)) {
6584                         cache->last_byte_to_unpin = (u64)-1;
6585                         list_del_init(&caching_ctl->list);
6586                         put_caching_control(caching_ctl);
6587                 } else {
6588                         cache->last_byte_to_unpin = caching_ctl->progress;
6589                 }
6590         }
6591
6592         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6593                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6594         else
6595                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6596
6597         up_write(&fs_info->commit_root_sem);
6598
6599         update_global_block_rsv(fs_info);
6600 }
6601
6602 /*
6603  * Returns the free cluster for the given space info and sets empty_cluster to
6604  * what it should be based on the mount options.
6605  */
6606 static struct btrfs_free_cluster *
6607 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6608                    u64 *empty_cluster)
6609 {
6610         struct btrfs_free_cluster *ret = NULL;
6611         bool ssd = btrfs_test_opt(root->fs_info, SSD);
6612
6613         *empty_cluster = 0;
6614         if (btrfs_mixed_space_info(space_info))
6615                 return ret;
6616
6617         if (ssd)
6618                 *empty_cluster = SZ_2M;
6619         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6620                 ret = &root->fs_info->meta_alloc_cluster;
6621                 if (!ssd)
6622                         *empty_cluster = SZ_64K;
6623         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6624                 ret = &root->fs_info->data_alloc_cluster;
6625         }
6626
6627         return ret;
6628 }
6629
6630 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6631                               const bool return_free_space)
6632 {
6633         struct btrfs_fs_info *fs_info = root->fs_info;
6634         struct btrfs_block_group_cache *cache = NULL;
6635         struct btrfs_space_info *space_info;
6636         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6637         struct btrfs_free_cluster *cluster = NULL;
6638         u64 len;
6639         u64 total_unpinned = 0;
6640         u64 empty_cluster = 0;
6641         bool readonly;
6642
6643         while (start <= end) {
6644                 readonly = false;
6645                 if (!cache ||
6646                     start >= cache->key.objectid + cache->key.offset) {
6647                         if (cache)
6648                                 btrfs_put_block_group(cache);
6649                         total_unpinned = 0;
6650                         cache = btrfs_lookup_block_group(fs_info, start);
6651                         BUG_ON(!cache); /* Logic error */
6652
6653                         cluster = fetch_cluster_info(root,
6654                                                      cache->space_info,
6655                                                      &empty_cluster);
6656                         empty_cluster <<= 1;
6657                 }
6658
6659                 len = cache->key.objectid + cache->key.offset - start;
6660                 len = min(len, end + 1 - start);
6661
6662                 if (start < cache->last_byte_to_unpin) {
6663                         len = min(len, cache->last_byte_to_unpin - start);
6664                         if (return_free_space)
6665                                 btrfs_add_free_space(cache, start, len);
6666                 }
6667
6668                 start += len;
6669                 total_unpinned += len;
6670                 space_info = cache->space_info;
6671
6672                 /*
6673                  * If this space cluster has been marked as fragmented and we've
6674                  * unpinned enough in this block group to potentially allow a
6675                  * cluster to be created inside of it go ahead and clear the
6676                  * fragmented check.
6677                  */
6678                 if (cluster && cluster->fragmented &&
6679                     total_unpinned > empty_cluster) {
6680                         spin_lock(&cluster->lock);
6681                         cluster->fragmented = 0;
6682                         spin_unlock(&cluster->lock);
6683                 }
6684
6685                 spin_lock(&space_info->lock);
6686                 spin_lock(&cache->lock);
6687                 cache->pinned -= len;
6688                 space_info->bytes_pinned -= len;
6689
6690                 trace_btrfs_space_reservation(fs_info, "pinned",
6691                                               space_info->flags, len, 0);
6692                 space_info->max_extent_size = 0;
6693                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6694                 if (cache->ro) {
6695                         space_info->bytes_readonly += len;
6696                         readonly = true;
6697                 }
6698                 spin_unlock(&cache->lock);
6699                 if (!readonly && return_free_space &&
6700                     global_rsv->space_info == space_info) {
6701                         u64 to_add = len;
6702                         WARN_ON(!return_free_space);
6703                         spin_lock(&global_rsv->lock);
6704                         if (!global_rsv->full) {
6705                                 to_add = min(len, global_rsv->size -
6706                                              global_rsv->reserved);
6707                                 global_rsv->reserved += to_add;
6708                                 space_info->bytes_may_use += to_add;
6709                                 if (global_rsv->reserved >= global_rsv->size)
6710                                         global_rsv->full = 1;
6711                                 trace_btrfs_space_reservation(fs_info,
6712                                                               "space_info",
6713                                                               space_info->flags,
6714                                                               to_add, 1);
6715                                 len -= to_add;
6716                         }
6717                         spin_unlock(&global_rsv->lock);
6718                         /* Add to any tickets we may have */
6719                         if (len)
6720                                 space_info_add_new_bytes(fs_info, space_info,
6721                                                          len);
6722                 }
6723                 spin_unlock(&space_info->lock);
6724         }
6725
6726         if (cache)
6727                 btrfs_put_block_group(cache);
6728         return 0;
6729 }
6730
6731 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6732                                struct btrfs_root *root)
6733 {
6734         struct btrfs_fs_info *fs_info = root->fs_info;
6735         struct btrfs_block_group_cache *block_group, *tmp;
6736         struct list_head *deleted_bgs;
6737         struct extent_io_tree *unpin;
6738         u64 start;
6739         u64 end;
6740         int ret;
6741
6742         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6743                 unpin = &fs_info->freed_extents[1];
6744         else
6745                 unpin = &fs_info->freed_extents[0];
6746
6747         while (!trans->aborted) {
6748                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6749                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6750                                             EXTENT_DIRTY, NULL);
6751                 if (ret) {
6752                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6753                         break;
6754                 }
6755
6756                 if (btrfs_test_opt(root->fs_info, DISCARD))
6757                         ret = btrfs_discard_extent(root, start,
6758                                                    end + 1 - start, NULL);
6759
6760                 clear_extent_dirty(unpin, start, end);
6761                 unpin_extent_range(root, start, end, true);
6762                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6763                 cond_resched();
6764         }
6765
6766         /*
6767          * Transaction is finished.  We don't need the lock anymore.  We
6768          * do need to clean up the block groups in case of a transaction
6769          * abort.
6770          */
6771         deleted_bgs = &trans->transaction->deleted_bgs;
6772         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6773                 u64 trimmed = 0;
6774
6775                 ret = -EROFS;
6776                 if (!trans->aborted)
6777                         ret = btrfs_discard_extent(root,
6778                                                    block_group->key.objectid,
6779                                                    block_group->key.offset,
6780                                                    &trimmed);
6781
6782                 list_del_init(&block_group->bg_list);
6783                 btrfs_put_block_group_trimming(block_group);
6784                 btrfs_put_block_group(block_group);
6785
6786                 if (ret) {
6787                         const char *errstr = btrfs_decode_error(ret);
6788                         btrfs_warn(fs_info,
6789                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6790                                    ret, errstr);
6791                 }
6792         }
6793
6794         return 0;
6795 }
6796
6797 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6798                              u64 owner, u64 root_objectid)
6799 {
6800         struct btrfs_space_info *space_info;
6801         u64 flags;
6802
6803         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6804                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6805                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6806                 else
6807                         flags = BTRFS_BLOCK_GROUP_METADATA;
6808         } else {
6809                 flags = BTRFS_BLOCK_GROUP_DATA;
6810         }
6811
6812         space_info = __find_space_info(fs_info, flags);
6813         BUG_ON(!space_info); /* Logic bug */
6814         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6815 }
6816
6817
6818 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6819                                 struct btrfs_root *root,
6820                                 struct btrfs_delayed_ref_node *node, u64 parent,
6821                                 u64 root_objectid, u64 owner_objectid,
6822                                 u64 owner_offset, int refs_to_drop,
6823                                 struct btrfs_delayed_extent_op *extent_op)
6824 {
6825         struct btrfs_key key;
6826         struct btrfs_path *path;
6827         struct btrfs_fs_info *info = root->fs_info;
6828         struct btrfs_root *extent_root = info->extent_root;
6829         struct extent_buffer *leaf;
6830         struct btrfs_extent_item *ei;
6831         struct btrfs_extent_inline_ref *iref;
6832         int ret;
6833         int is_data;
6834         int extent_slot = 0;
6835         int found_extent = 0;
6836         int num_to_del = 1;
6837         u32 item_size;
6838         u64 refs;
6839         u64 bytenr = node->bytenr;
6840         u64 num_bytes = node->num_bytes;
6841         int last_ref = 0;
6842         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6843                                                  SKINNY_METADATA);
6844
6845         path = btrfs_alloc_path();
6846         if (!path)
6847                 return -ENOMEM;
6848
6849         path->reada = READA_FORWARD;
6850         path->leave_spinning = 1;
6851
6852         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6853         BUG_ON(!is_data && refs_to_drop != 1);
6854
6855         if (is_data)
6856                 skinny_metadata = 0;
6857
6858         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6859                                     bytenr, num_bytes, parent,
6860                                     root_objectid, owner_objectid,
6861                                     owner_offset);
6862         if (ret == 0) {
6863                 extent_slot = path->slots[0];
6864                 while (extent_slot >= 0) {
6865                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6866                                               extent_slot);
6867                         if (key.objectid != bytenr)
6868                                 break;
6869                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6870                             key.offset == num_bytes) {
6871                                 found_extent = 1;
6872                                 break;
6873                         }
6874                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6875                             key.offset == owner_objectid) {
6876                                 found_extent = 1;
6877                                 break;
6878                         }
6879                         if (path->slots[0] - extent_slot > 5)
6880                                 break;
6881                         extent_slot--;
6882                 }
6883 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6884                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6885                 if (found_extent && item_size < sizeof(*ei))
6886                         found_extent = 0;
6887 #endif
6888                 if (!found_extent) {
6889                         BUG_ON(iref);
6890                         ret = remove_extent_backref(trans, extent_root, path,
6891                                                     NULL, refs_to_drop,
6892                                                     is_data, &last_ref);
6893                         if (ret) {
6894                                 btrfs_abort_transaction(trans, ret);
6895                                 goto out;
6896                         }
6897                         btrfs_release_path(path);
6898                         path->leave_spinning = 1;
6899
6900                         key.objectid = bytenr;
6901                         key.type = BTRFS_EXTENT_ITEM_KEY;
6902                         key.offset = num_bytes;
6903
6904                         if (!is_data && skinny_metadata) {
6905                                 key.type = BTRFS_METADATA_ITEM_KEY;
6906                                 key.offset = owner_objectid;
6907                         }
6908
6909                         ret = btrfs_search_slot(trans, extent_root,
6910                                                 &key, path, -1, 1);
6911                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6912                                 /*
6913                                  * Couldn't find our skinny metadata item,
6914                                  * see if we have ye olde extent item.
6915                                  */
6916                                 path->slots[0]--;
6917                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6918                                                       path->slots[0]);
6919                                 if (key.objectid == bytenr &&
6920                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6921                                     key.offset == num_bytes)
6922                                         ret = 0;
6923                         }
6924
6925                         if (ret > 0 && skinny_metadata) {
6926                                 skinny_metadata = false;
6927                                 key.objectid = bytenr;
6928                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6929                                 key.offset = num_bytes;
6930                                 btrfs_release_path(path);
6931                                 ret = btrfs_search_slot(trans, extent_root,
6932                                                         &key, path, -1, 1);
6933                         }
6934
6935                         if (ret) {
6936                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6937                                         ret, bytenr);
6938                                 if (ret > 0)
6939                                         btrfs_print_leaf(extent_root,
6940                                                          path->nodes[0]);
6941                         }
6942                         if (ret < 0) {
6943                                 btrfs_abort_transaction(trans, ret);
6944                                 goto out;
6945                         }
6946                         extent_slot = path->slots[0];
6947                 }
6948         } else if (WARN_ON(ret == -ENOENT)) {
6949                 btrfs_print_leaf(extent_root, path->nodes[0]);
6950                 btrfs_err(info,
6951                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6952                         bytenr, parent, root_objectid, owner_objectid,
6953                         owner_offset);
6954                 btrfs_abort_transaction(trans, ret);
6955                 goto out;
6956         } else {
6957                 btrfs_abort_transaction(trans, ret);
6958                 goto out;
6959         }
6960
6961         leaf = path->nodes[0];
6962         item_size = btrfs_item_size_nr(leaf, extent_slot);
6963 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6964         if (item_size < sizeof(*ei)) {
6965                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6966                 ret = convert_extent_item_v0(trans, extent_root, path,
6967                                              owner_objectid, 0);
6968                 if (ret < 0) {
6969                         btrfs_abort_transaction(trans, ret);
6970                         goto out;
6971                 }
6972
6973                 btrfs_release_path(path);
6974                 path->leave_spinning = 1;
6975
6976                 key.objectid = bytenr;
6977                 key.type = BTRFS_EXTENT_ITEM_KEY;
6978                 key.offset = num_bytes;
6979
6980                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6981                                         -1, 1);
6982                 if (ret) {
6983                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6984                                 ret, bytenr);
6985                         btrfs_print_leaf(extent_root, path->nodes[0]);
6986                 }
6987                 if (ret < 0) {
6988                         btrfs_abort_transaction(trans, ret);
6989                         goto out;
6990                 }
6991
6992                 extent_slot = path->slots[0];
6993                 leaf = path->nodes[0];
6994                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6995         }
6996 #endif
6997         BUG_ON(item_size < sizeof(*ei));
6998         ei = btrfs_item_ptr(leaf, extent_slot,
6999                             struct btrfs_extent_item);
7000         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
7001             key.type == BTRFS_EXTENT_ITEM_KEY) {
7002                 struct btrfs_tree_block_info *bi;
7003                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
7004                 bi = (struct btrfs_tree_block_info *)(ei + 1);
7005                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
7006         }
7007
7008         refs = btrfs_extent_refs(leaf, ei);
7009         if (refs < refs_to_drop) {
7010                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
7011                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
7012                 ret = -EINVAL;
7013                 btrfs_abort_transaction(trans, ret);
7014                 goto out;
7015         }
7016         refs -= refs_to_drop;
7017
7018         if (refs > 0) {
7019                 if (extent_op)
7020                         __run_delayed_extent_op(extent_op, leaf, ei);
7021                 /*
7022                  * In the case of inline back ref, reference count will
7023                  * be updated by remove_extent_backref
7024                  */
7025                 if (iref) {
7026                         BUG_ON(!found_extent);
7027                 } else {
7028                         btrfs_set_extent_refs(leaf, ei, refs);
7029                         btrfs_mark_buffer_dirty(leaf);
7030                 }
7031                 if (found_extent) {
7032                         ret = remove_extent_backref(trans, extent_root, path,
7033                                                     iref, refs_to_drop,
7034                                                     is_data, &last_ref);
7035                         if (ret) {
7036                                 btrfs_abort_transaction(trans, ret);
7037                                 goto out;
7038                         }
7039                 }
7040                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
7041                                  root_objectid);
7042         } else {
7043                 if (found_extent) {
7044                         BUG_ON(is_data && refs_to_drop !=
7045                                extent_data_ref_count(path, iref));
7046                         if (iref) {
7047                                 BUG_ON(path->slots[0] != extent_slot);
7048                         } else {
7049                                 BUG_ON(path->slots[0] != extent_slot + 1);
7050                                 path->slots[0] = extent_slot;
7051                                 num_to_del = 2;
7052                         }
7053                 }
7054
7055                 last_ref = 1;
7056                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7057                                       num_to_del);
7058                 if (ret) {
7059                         btrfs_abort_transaction(trans, ret);
7060                         goto out;
7061                 }
7062                 btrfs_release_path(path);
7063
7064                 if (is_data) {
7065                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
7066                         if (ret) {
7067                                 btrfs_abort_transaction(trans, ret);
7068                                 goto out;
7069                         }
7070                 }
7071
7072                 ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
7073                                              num_bytes);
7074                 if (ret) {
7075                         btrfs_abort_transaction(trans, ret);
7076                         goto out;
7077                 }
7078
7079                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
7080                 if (ret) {
7081                         btrfs_abort_transaction(trans, ret);
7082                         goto out;
7083                 }
7084         }
7085         btrfs_release_path(path);
7086
7087 out:
7088         btrfs_free_path(path);
7089         return ret;
7090 }
7091
7092 /*
7093  * when we free an block, it is possible (and likely) that we free the last
7094  * delayed ref for that extent as well.  This searches the delayed ref tree for
7095  * a given extent, and if there are no other delayed refs to be processed, it
7096  * removes it from the tree.
7097  */
7098 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7099                                       struct btrfs_root *root, u64 bytenr)
7100 {
7101         struct btrfs_delayed_ref_head *head;
7102         struct btrfs_delayed_ref_root *delayed_refs;
7103         int ret = 0;
7104
7105         delayed_refs = &trans->transaction->delayed_refs;
7106         spin_lock(&delayed_refs->lock);
7107         head = btrfs_find_delayed_ref_head(trans, bytenr);
7108         if (!head)
7109                 goto out_delayed_unlock;
7110
7111         spin_lock(&head->lock);
7112         if (!list_empty(&head->ref_list))
7113                 goto out;
7114
7115         if (head->extent_op) {
7116                 if (!head->must_insert_reserved)
7117                         goto out;
7118                 btrfs_free_delayed_extent_op(head->extent_op);
7119                 head->extent_op = NULL;
7120         }
7121
7122         /*
7123          * waiting for the lock here would deadlock.  If someone else has it
7124          * locked they are already in the process of dropping it anyway
7125          */
7126         if (!mutex_trylock(&head->mutex))
7127                 goto out;
7128
7129         /*
7130          * at this point we have a head with no other entries.  Go
7131          * ahead and process it.
7132          */
7133         head->node.in_tree = 0;
7134         rb_erase(&head->href_node, &delayed_refs->href_root);
7135
7136         atomic_dec(&delayed_refs->num_entries);
7137
7138         /*
7139          * we don't take a ref on the node because we're removing it from the
7140          * tree, so we just steal the ref the tree was holding.
7141          */
7142         delayed_refs->num_heads--;
7143         if (head->processing == 0)
7144                 delayed_refs->num_heads_ready--;
7145         head->processing = 0;
7146         spin_unlock(&head->lock);
7147         spin_unlock(&delayed_refs->lock);
7148
7149         BUG_ON(head->extent_op);
7150         if (head->must_insert_reserved)
7151                 ret = 1;
7152
7153         mutex_unlock(&head->mutex);
7154         btrfs_put_delayed_ref(&head->node);
7155         return ret;
7156 out:
7157         spin_unlock(&head->lock);
7158
7159 out_delayed_unlock:
7160         spin_unlock(&delayed_refs->lock);
7161         return 0;
7162 }
7163
7164 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7165                            struct btrfs_root *root,
7166                            struct extent_buffer *buf,
7167                            u64 parent, int last_ref)
7168 {
7169         int pin = 1;
7170         int ret;
7171
7172         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7173                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7174                                         buf->start, buf->len,
7175                                         parent, root->root_key.objectid,
7176                                         btrfs_header_level(buf),
7177                                         BTRFS_DROP_DELAYED_REF, NULL);
7178                 BUG_ON(ret); /* -ENOMEM */
7179         }
7180
7181         if (!last_ref)
7182                 return;
7183
7184         if (btrfs_header_generation(buf) == trans->transid) {
7185                 struct btrfs_block_group_cache *cache;
7186
7187                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7188                         ret = check_ref_cleanup(trans, root, buf->start);
7189                         if (!ret)
7190                                 goto out;
7191                 }
7192
7193                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
7194
7195                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7196                         pin_down_extent(root, cache, buf->start, buf->len, 1);
7197                         btrfs_put_block_group(cache);
7198                         goto out;
7199                 }
7200
7201                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7202
7203                 btrfs_add_free_space(cache, buf->start, buf->len);
7204                 btrfs_free_reserved_bytes(cache, buf->len, 0);
7205                 btrfs_put_block_group(cache);
7206                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
7207                 pin = 0;
7208         }
7209 out:
7210         if (pin)
7211                 add_pinned_bytes(root->fs_info, buf->len,
7212                                  btrfs_header_level(buf),
7213                                  root->root_key.objectid);
7214
7215         /*
7216          * Deleting the buffer, clear the corrupt flag since it doesn't matter
7217          * anymore.
7218          */
7219         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7220 }
7221
7222 /* Can return -ENOMEM */
7223 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7224                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7225                       u64 owner, u64 offset)
7226 {
7227         int ret;
7228         struct btrfs_fs_info *fs_info = root->fs_info;
7229
7230         if (btrfs_is_testing(fs_info))
7231                 return 0;
7232
7233         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
7234
7235         /*
7236          * tree log blocks never actually go into the extent allocation
7237          * tree, just update pinning info and exit early.
7238          */
7239         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7240                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7241                 /* unlocks the pinned mutex */
7242                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
7243                 ret = 0;
7244         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7245                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7246                                         num_bytes,
7247                                         parent, root_objectid, (int)owner,
7248                                         BTRFS_DROP_DELAYED_REF, NULL);
7249         } else {
7250                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7251                                                 num_bytes,
7252                                                 parent, root_objectid, owner,
7253                                                 offset, 0,
7254                                                 BTRFS_DROP_DELAYED_REF, NULL);
7255         }
7256         return ret;
7257 }
7258
7259 /*
7260  * when we wait for progress in the block group caching, its because
7261  * our allocation attempt failed at least once.  So, we must sleep
7262  * and let some progress happen before we try again.
7263  *
7264  * This function will sleep at least once waiting for new free space to
7265  * show up, and then it will check the block group free space numbers
7266  * for our min num_bytes.  Another option is to have it go ahead
7267  * and look in the rbtree for a free extent of a given size, but this
7268  * is a good start.
7269  *
7270  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7271  * any of the information in this block group.
7272  */
7273 static noinline void
7274 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7275                                 u64 num_bytes)
7276 {
7277         struct btrfs_caching_control *caching_ctl;
7278
7279         caching_ctl = get_caching_control(cache);
7280         if (!caching_ctl)
7281                 return;
7282
7283         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7284                    (cache->free_space_ctl->free_space >= num_bytes));
7285
7286         put_caching_control(caching_ctl);
7287 }
7288
7289 static noinline int
7290 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7291 {
7292         struct btrfs_caching_control *caching_ctl;
7293         int ret = 0;
7294
7295         caching_ctl = get_caching_control(cache);
7296         if (!caching_ctl)
7297                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7298
7299         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7300         if (cache->cached == BTRFS_CACHE_ERROR)
7301                 ret = -EIO;
7302         put_caching_control(caching_ctl);
7303         return ret;
7304 }
7305
7306 int __get_raid_index(u64 flags)
7307 {
7308         if (flags & BTRFS_BLOCK_GROUP_RAID10)
7309                 return BTRFS_RAID_RAID10;
7310         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
7311                 return BTRFS_RAID_RAID1;
7312         else if (flags & BTRFS_BLOCK_GROUP_DUP)
7313                 return BTRFS_RAID_DUP;
7314         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
7315                 return BTRFS_RAID_RAID0;
7316         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
7317                 return BTRFS_RAID_RAID5;
7318         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
7319                 return BTRFS_RAID_RAID6;
7320
7321         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
7322 }
7323
7324 int get_block_group_index(struct btrfs_block_group_cache *cache)
7325 {
7326         return __get_raid_index(cache->flags);
7327 }
7328
7329 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7330         [BTRFS_RAID_RAID10]     = "raid10",
7331         [BTRFS_RAID_RAID1]      = "raid1",
7332         [BTRFS_RAID_DUP]        = "dup",
7333         [BTRFS_RAID_RAID0]      = "raid0",
7334         [BTRFS_RAID_SINGLE]     = "single",
7335         [BTRFS_RAID_RAID5]      = "raid5",
7336         [BTRFS_RAID_RAID6]      = "raid6",
7337 };
7338
7339 static const char *get_raid_name(enum btrfs_raid_types type)
7340 {
7341         if (type >= BTRFS_NR_RAID_TYPES)
7342                 return NULL;
7343
7344         return btrfs_raid_type_names[type];
7345 }
7346
7347 enum btrfs_loop_type {
7348         LOOP_CACHING_NOWAIT = 0,
7349         LOOP_CACHING_WAIT = 1,
7350         LOOP_ALLOC_CHUNK = 2,
7351         LOOP_NO_EMPTY_SIZE = 3,
7352 };
7353
7354 static inline void
7355 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7356                        int delalloc)
7357 {
7358         if (delalloc)
7359                 down_read(&cache->data_rwsem);
7360 }
7361
7362 static inline void
7363 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7364                        int delalloc)
7365 {
7366         btrfs_get_block_group(cache);
7367         if (delalloc)
7368                 down_read(&cache->data_rwsem);
7369 }
7370
7371 static struct btrfs_block_group_cache *
7372 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7373                    struct btrfs_free_cluster *cluster,
7374                    int delalloc)
7375 {
7376         struct btrfs_block_group_cache *used_bg = NULL;
7377
7378         spin_lock(&cluster->refill_lock);
7379         while (1) {
7380                 used_bg = cluster->block_group;
7381                 if (!used_bg)
7382                         return NULL;
7383
7384                 if (used_bg == block_group)
7385                         return used_bg;
7386
7387                 btrfs_get_block_group(used_bg);
7388
7389                 if (!delalloc)
7390                         return used_bg;
7391
7392                 if (down_read_trylock(&used_bg->data_rwsem))
7393                         return used_bg;
7394
7395                 spin_unlock(&cluster->refill_lock);
7396
7397                 down_read(&used_bg->data_rwsem);
7398
7399                 spin_lock(&cluster->refill_lock);
7400                 if (used_bg == cluster->block_group)
7401                         return used_bg;
7402
7403                 up_read(&used_bg->data_rwsem);
7404                 btrfs_put_block_group(used_bg);
7405         }
7406 }
7407
7408 static inline void
7409 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7410                          int delalloc)
7411 {
7412         if (delalloc)
7413                 up_read(&cache->data_rwsem);
7414         btrfs_put_block_group(cache);
7415 }
7416
7417 /*
7418  * walks the btree of allocated extents and find a hole of a given size.
7419  * The key ins is changed to record the hole:
7420  * ins->objectid == start position
7421  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7422  * ins->offset == the size of the hole.
7423  * Any available blocks before search_start are skipped.
7424  *
7425  * If there is no suitable free space, we will record the max size of
7426  * the free space extent currently.
7427  */
7428 static noinline int find_free_extent(struct btrfs_root *orig_root,
7429                                 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7430                                 u64 hint_byte, struct btrfs_key *ins,
7431                                 u64 flags, int delalloc)
7432 {
7433         int ret = 0;
7434         struct btrfs_root *root = orig_root->fs_info->extent_root;
7435         struct btrfs_free_cluster *last_ptr = NULL;
7436         struct btrfs_block_group_cache *block_group = NULL;
7437         u64 search_start = 0;
7438         u64 max_extent_size = 0;
7439         u64 empty_cluster = 0;
7440         struct btrfs_space_info *space_info;
7441         int loop = 0;
7442         int index = __get_raid_index(flags);
7443         bool failed_cluster_refill = false;
7444         bool failed_alloc = false;
7445         bool use_cluster = true;
7446         bool have_caching_bg = false;
7447         bool orig_have_caching_bg = false;
7448         bool full_search = false;
7449
7450         WARN_ON(num_bytes < root->sectorsize);
7451         ins->type = BTRFS_EXTENT_ITEM_KEY;
7452         ins->objectid = 0;
7453         ins->offset = 0;
7454
7455         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7456
7457         space_info = __find_space_info(root->fs_info, flags);
7458         if (!space_info) {
7459                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7460                 return -ENOSPC;
7461         }
7462
7463         /*
7464          * If our free space is heavily fragmented we may not be able to make
7465          * big contiguous allocations, so instead of doing the expensive search
7466          * for free space, simply return ENOSPC with our max_extent_size so we
7467          * can go ahead and search for a more manageable chunk.
7468          *
7469          * If our max_extent_size is large enough for our allocation simply
7470          * disable clustering since we will likely not be able to find enough
7471          * space to create a cluster and induce latency trying.
7472          */
7473         if (unlikely(space_info->max_extent_size)) {
7474                 spin_lock(&space_info->lock);
7475                 if (space_info->max_extent_size &&
7476                     num_bytes > space_info->max_extent_size) {
7477                         ins->offset = space_info->max_extent_size;
7478                         spin_unlock(&space_info->lock);
7479                         return -ENOSPC;
7480                 } else if (space_info->max_extent_size) {
7481                         use_cluster = false;
7482                 }
7483                 spin_unlock(&space_info->lock);
7484         }
7485
7486         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7487         if (last_ptr) {
7488                 spin_lock(&last_ptr->lock);
7489                 if (last_ptr->block_group)
7490                         hint_byte = last_ptr->window_start;
7491                 if (last_ptr->fragmented) {
7492                         /*
7493                          * We still set window_start so we can keep track of the
7494                          * last place we found an allocation to try and save
7495                          * some time.
7496                          */
7497                         hint_byte = last_ptr->window_start;
7498                         use_cluster = false;
7499                 }
7500                 spin_unlock(&last_ptr->lock);
7501         }
7502
7503         search_start = max(search_start, first_logical_byte(root, 0));
7504         search_start = max(search_start, hint_byte);
7505         if (search_start == hint_byte) {
7506                 block_group = btrfs_lookup_block_group(root->fs_info,
7507                                                        search_start);
7508                 /*
7509                  * we don't want to use the block group if it doesn't match our
7510                  * allocation bits, or if its not cached.
7511                  *
7512                  * However if we are re-searching with an ideal block group
7513                  * picked out then we don't care that the block group is cached.
7514                  */
7515                 if (block_group && block_group_bits(block_group, flags) &&
7516                     block_group->cached != BTRFS_CACHE_NO) {
7517                         down_read(&space_info->groups_sem);
7518                         if (list_empty(&block_group->list) ||
7519                             block_group->ro) {
7520                                 /*
7521                                  * someone is removing this block group,
7522                                  * we can't jump into the have_block_group
7523                                  * target because our list pointers are not
7524                                  * valid
7525                                  */
7526                                 btrfs_put_block_group(block_group);
7527                                 up_read(&space_info->groups_sem);
7528                         } else {
7529                                 index = get_block_group_index(block_group);
7530                                 btrfs_lock_block_group(block_group, delalloc);
7531                                 goto have_block_group;
7532                         }
7533                 } else if (block_group) {
7534                         btrfs_put_block_group(block_group);
7535                 }
7536         }
7537 search:
7538         have_caching_bg = false;
7539         if (index == 0 || index == __get_raid_index(flags))
7540                 full_search = true;
7541         down_read(&space_info->groups_sem);
7542         list_for_each_entry(block_group, &space_info->block_groups[index],
7543                             list) {
7544                 u64 offset;
7545                 int cached;
7546
7547                 btrfs_grab_block_group(block_group, delalloc);
7548                 search_start = block_group->key.objectid;
7549
7550                 /*
7551                  * this can happen if we end up cycling through all the
7552                  * raid types, but we want to make sure we only allocate
7553                  * for the proper type.
7554                  */
7555                 if (!block_group_bits(block_group, flags)) {
7556                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7557                                 BTRFS_BLOCK_GROUP_RAID1 |
7558                                 BTRFS_BLOCK_GROUP_RAID5 |
7559                                 BTRFS_BLOCK_GROUP_RAID6 |
7560                                 BTRFS_BLOCK_GROUP_RAID10;
7561
7562                         /*
7563                          * if they asked for extra copies and this block group
7564                          * doesn't provide them, bail.  This does allow us to
7565                          * fill raid0 from raid1.
7566                          */
7567                         if ((flags & extra) && !(block_group->flags & extra))
7568                                 goto loop;
7569                 }
7570
7571 have_block_group:
7572                 cached = block_group_cache_done(block_group);
7573                 if (unlikely(!cached)) {
7574                         have_caching_bg = true;
7575                         ret = cache_block_group(block_group, 0);
7576                         BUG_ON(ret < 0);
7577                         ret = 0;
7578                 }
7579
7580                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7581                         goto loop;
7582                 if (unlikely(block_group->ro))
7583                         goto loop;
7584
7585                 /*
7586                  * Ok we want to try and use the cluster allocator, so
7587                  * lets look there
7588                  */
7589                 if (last_ptr && use_cluster) {
7590                         struct btrfs_block_group_cache *used_block_group;
7591                         unsigned long aligned_cluster;
7592                         /*
7593                          * the refill lock keeps out other
7594                          * people trying to start a new cluster
7595                          */
7596                         used_block_group = btrfs_lock_cluster(block_group,
7597                                                               last_ptr,
7598                                                               delalloc);
7599                         if (!used_block_group)
7600                                 goto refill_cluster;
7601
7602                         if (used_block_group != block_group &&
7603                             (used_block_group->ro ||
7604                              !block_group_bits(used_block_group, flags)))
7605                                 goto release_cluster;
7606
7607                         offset = btrfs_alloc_from_cluster(used_block_group,
7608                                                 last_ptr,
7609                                                 num_bytes,
7610                                                 used_block_group->key.objectid,
7611                                                 &max_extent_size);
7612                         if (offset) {
7613                                 /* we have a block, we're done */
7614                                 spin_unlock(&last_ptr->refill_lock);
7615                                 trace_btrfs_reserve_extent_cluster(root,
7616                                                 used_block_group,
7617                                                 search_start, num_bytes);
7618                                 if (used_block_group != block_group) {
7619                                         btrfs_release_block_group(block_group,
7620                                                                   delalloc);
7621                                         block_group = used_block_group;
7622                                 }
7623                                 goto checks;
7624                         }
7625
7626                         WARN_ON(last_ptr->block_group != used_block_group);
7627 release_cluster:
7628                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7629                          * set up a new clusters, so lets just skip it
7630                          * and let the allocator find whatever block
7631                          * it can find.  If we reach this point, we
7632                          * will have tried the cluster allocator
7633                          * plenty of times and not have found
7634                          * anything, so we are likely way too
7635                          * fragmented for the clustering stuff to find
7636                          * anything.
7637                          *
7638                          * However, if the cluster is taken from the
7639                          * current block group, release the cluster
7640                          * first, so that we stand a better chance of
7641                          * succeeding in the unclustered
7642                          * allocation.  */
7643                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7644                             used_block_group != block_group) {
7645                                 spin_unlock(&last_ptr->refill_lock);
7646                                 btrfs_release_block_group(used_block_group,
7647                                                           delalloc);
7648                                 goto unclustered_alloc;
7649                         }
7650
7651                         /*
7652                          * this cluster didn't work out, free it and
7653                          * start over
7654                          */
7655                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7656
7657                         if (used_block_group != block_group)
7658                                 btrfs_release_block_group(used_block_group,
7659                                                           delalloc);
7660 refill_cluster:
7661                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7662                                 spin_unlock(&last_ptr->refill_lock);
7663                                 goto unclustered_alloc;
7664                         }
7665
7666                         aligned_cluster = max_t(unsigned long,
7667                                                 empty_cluster + empty_size,
7668                                               block_group->full_stripe_len);
7669
7670                         /* allocate a cluster in this block group */
7671                         ret = btrfs_find_space_cluster(root, block_group,
7672                                                        last_ptr, search_start,
7673                                                        num_bytes,
7674                                                        aligned_cluster);
7675                         if (ret == 0) {
7676                                 /*
7677                                  * now pull our allocation out of this
7678                                  * cluster
7679                                  */
7680                                 offset = btrfs_alloc_from_cluster(block_group,
7681                                                         last_ptr,
7682                                                         num_bytes,
7683                                                         search_start,
7684                                                         &max_extent_size);
7685                                 if (offset) {
7686                                         /* we found one, proceed */
7687                                         spin_unlock(&last_ptr->refill_lock);
7688                                         trace_btrfs_reserve_extent_cluster(root,
7689                                                 block_group, search_start,
7690                                                 num_bytes);
7691                                         goto checks;
7692                                 }
7693                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7694                                    && !failed_cluster_refill) {
7695                                 spin_unlock(&last_ptr->refill_lock);
7696
7697                                 failed_cluster_refill = true;
7698                                 wait_block_group_cache_progress(block_group,
7699                                        num_bytes + empty_cluster + empty_size);
7700                                 goto have_block_group;
7701                         }
7702
7703                         /*
7704                          * at this point we either didn't find a cluster
7705                          * or we weren't able to allocate a block from our
7706                          * cluster.  Free the cluster we've been trying
7707                          * to use, and go to the next block group
7708                          */
7709                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7710                         spin_unlock(&last_ptr->refill_lock);
7711                         goto loop;
7712                 }
7713
7714 unclustered_alloc:
7715                 /*
7716                  * We are doing an unclustered alloc, set the fragmented flag so
7717                  * we don't bother trying to setup a cluster again until we get
7718                  * more space.
7719                  */
7720                 if (unlikely(last_ptr)) {
7721                         spin_lock(&last_ptr->lock);
7722                         last_ptr->fragmented = 1;
7723                         spin_unlock(&last_ptr->lock);
7724                 }
7725                 spin_lock(&block_group->free_space_ctl->tree_lock);
7726                 if (cached &&
7727                     block_group->free_space_ctl->free_space <
7728                     num_bytes + empty_cluster + empty_size) {
7729                         if (block_group->free_space_ctl->free_space >
7730                             max_extent_size)
7731                                 max_extent_size =
7732                                         block_group->free_space_ctl->free_space;
7733                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7734                         goto loop;
7735                 }
7736                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7737
7738                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7739                                                     num_bytes, empty_size,
7740                                                     &max_extent_size);
7741                 /*
7742                  * If we didn't find a chunk, and we haven't failed on this
7743                  * block group before, and this block group is in the middle of
7744                  * caching and we are ok with waiting, then go ahead and wait
7745                  * for progress to be made, and set failed_alloc to true.
7746                  *
7747                  * If failed_alloc is true then we've already waited on this
7748                  * block group once and should move on to the next block group.
7749                  */
7750                 if (!offset && !failed_alloc && !cached &&
7751                     loop > LOOP_CACHING_NOWAIT) {
7752                         wait_block_group_cache_progress(block_group,
7753                                                 num_bytes + empty_size);
7754                         failed_alloc = true;
7755                         goto have_block_group;
7756                 } else if (!offset) {
7757                         goto loop;
7758                 }
7759 checks:
7760                 search_start = ALIGN(offset, root->stripesize);
7761
7762                 /* move on to the next group */
7763                 if (search_start + num_bytes >
7764                     block_group->key.objectid + block_group->key.offset) {
7765                         btrfs_add_free_space(block_group, offset, num_bytes);
7766                         goto loop;
7767                 }
7768
7769                 if (offset < search_start)
7770                         btrfs_add_free_space(block_group, offset,
7771                                              search_start - offset);
7772                 BUG_ON(offset > search_start);
7773
7774                 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7775                                 num_bytes, delalloc);
7776                 if (ret == -EAGAIN) {
7777                         btrfs_add_free_space(block_group, offset, num_bytes);
7778                         goto loop;
7779                 }
7780                 btrfs_inc_block_group_reservations(block_group);
7781
7782                 /* we are all good, lets return */
7783                 ins->objectid = search_start;
7784                 ins->offset = num_bytes;
7785
7786                 trace_btrfs_reserve_extent(orig_root, block_group,
7787                                            search_start, num_bytes);
7788                 btrfs_release_block_group(block_group, delalloc);
7789                 break;
7790 loop:
7791                 failed_cluster_refill = false;
7792                 failed_alloc = false;
7793                 BUG_ON(index != get_block_group_index(block_group));
7794                 btrfs_release_block_group(block_group, delalloc);
7795         }
7796         up_read(&space_info->groups_sem);
7797
7798         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7799                 && !orig_have_caching_bg)
7800                 orig_have_caching_bg = true;
7801
7802         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7803                 goto search;
7804
7805         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7806                 goto search;
7807
7808         /*
7809          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7810          *                      caching kthreads as we move along
7811          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7812          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7813          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7814          *                      again
7815          */
7816         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7817                 index = 0;
7818                 if (loop == LOOP_CACHING_NOWAIT) {
7819                         /*
7820                          * We want to skip the LOOP_CACHING_WAIT step if we
7821                          * don't have any uncached bgs and we've already done a
7822                          * full search through.
7823                          */
7824                         if (orig_have_caching_bg || !full_search)
7825                                 loop = LOOP_CACHING_WAIT;
7826                         else
7827                                 loop = LOOP_ALLOC_CHUNK;
7828                 } else {
7829                         loop++;
7830                 }
7831
7832                 if (loop == LOOP_ALLOC_CHUNK) {
7833                         struct btrfs_trans_handle *trans;
7834                         int exist = 0;
7835
7836                         trans = current->journal_info;
7837                         if (trans)
7838                                 exist = 1;
7839                         else
7840                                 trans = btrfs_join_transaction(root);
7841
7842                         if (IS_ERR(trans)) {
7843                                 ret = PTR_ERR(trans);
7844                                 goto out;
7845                         }
7846
7847                         ret = do_chunk_alloc(trans, root, flags,
7848                                              CHUNK_ALLOC_FORCE);
7849
7850                         /*
7851                          * If we can't allocate a new chunk we've already looped
7852                          * through at least once, move on to the NO_EMPTY_SIZE
7853                          * case.
7854                          */
7855                         if (ret == -ENOSPC)
7856                                 loop = LOOP_NO_EMPTY_SIZE;
7857
7858                         /*
7859                          * Do not bail out on ENOSPC since we
7860                          * can do more things.
7861                          */
7862                         if (ret < 0 && ret != -ENOSPC)
7863                                 btrfs_abort_transaction(trans, ret);
7864                         else
7865                                 ret = 0;
7866                         if (!exist)
7867                                 btrfs_end_transaction(trans, root);
7868                         if (ret)
7869                                 goto out;
7870                 }
7871
7872                 if (loop == LOOP_NO_EMPTY_SIZE) {
7873                         /*
7874                          * Don't loop again if we already have no empty_size and
7875                          * no empty_cluster.
7876                          */
7877                         if (empty_size == 0 &&
7878                             empty_cluster == 0) {
7879                                 ret = -ENOSPC;
7880                                 goto out;
7881                         }
7882                         empty_size = 0;
7883                         empty_cluster = 0;
7884                 }
7885
7886                 goto search;
7887         } else if (!ins->objectid) {
7888                 ret = -ENOSPC;
7889         } else if (ins->objectid) {
7890                 if (!use_cluster && last_ptr) {
7891                         spin_lock(&last_ptr->lock);
7892                         last_ptr->window_start = ins->objectid;
7893                         spin_unlock(&last_ptr->lock);
7894                 }
7895                 ret = 0;
7896         }
7897 out:
7898         if (ret == -ENOSPC) {
7899                 spin_lock(&space_info->lock);
7900                 space_info->max_extent_size = max_extent_size;
7901                 spin_unlock(&space_info->lock);
7902                 ins->offset = max_extent_size;
7903         }
7904         return ret;
7905 }
7906
7907 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7908                             int dump_block_groups)
7909 {
7910         struct btrfs_block_group_cache *cache;
7911         int index = 0;
7912
7913         spin_lock(&info->lock);
7914         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7915                info->flags,
7916                info->total_bytes - info->bytes_used - info->bytes_pinned -
7917                info->bytes_reserved - info->bytes_readonly -
7918                info->bytes_may_use, (info->full) ? "" : "not ");
7919         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7920                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7921                info->total_bytes, info->bytes_used, info->bytes_pinned,
7922                info->bytes_reserved, info->bytes_may_use,
7923                info->bytes_readonly);
7924         spin_unlock(&info->lock);
7925
7926         if (!dump_block_groups)
7927                 return;
7928
7929         down_read(&info->groups_sem);
7930 again:
7931         list_for_each_entry(cache, &info->block_groups[index], list) {
7932                 spin_lock(&cache->lock);
7933                 printk(KERN_INFO "BTRFS: "
7934                            "block group %llu has %llu bytes, "
7935                            "%llu used %llu pinned %llu reserved %s\n",
7936                        cache->key.objectid, cache->key.offset,
7937                        btrfs_block_group_used(&cache->item), cache->pinned,
7938                        cache->reserved, cache->ro ? "[readonly]" : "");
7939                 btrfs_dump_free_space(cache, bytes);
7940                 spin_unlock(&cache->lock);
7941         }
7942         if (++index < BTRFS_NR_RAID_TYPES)
7943                 goto again;
7944         up_read(&info->groups_sem);
7945 }
7946
7947 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7948                          u64 num_bytes, u64 min_alloc_size,
7949                          u64 empty_size, u64 hint_byte,
7950                          struct btrfs_key *ins, int is_data, int delalloc)
7951 {
7952         bool final_tried = num_bytes == min_alloc_size;
7953         u64 flags;
7954         int ret;
7955
7956         flags = btrfs_get_alloc_profile(root, is_data);
7957 again:
7958         WARN_ON(num_bytes < root->sectorsize);
7959         ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
7960                                hint_byte, ins, flags, delalloc);
7961         if (!ret && !is_data) {
7962                 btrfs_dec_block_group_reservations(root->fs_info,
7963                                                    ins->objectid);
7964         } else if (ret == -ENOSPC) {
7965                 if (!final_tried && ins->offset) {
7966                         num_bytes = min(num_bytes >> 1, ins->offset);
7967                         num_bytes = round_down(num_bytes, root->sectorsize);
7968                         num_bytes = max(num_bytes, min_alloc_size);
7969                         ram_bytes = num_bytes;
7970                         if (num_bytes == min_alloc_size)
7971                                 final_tried = true;
7972                         goto again;
7973                 } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
7974                         struct btrfs_space_info *sinfo;
7975
7976                         sinfo = __find_space_info(root->fs_info, flags);
7977                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7978                                 flags, num_bytes);
7979                         if (sinfo)
7980                                 dump_space_info(sinfo, num_bytes, 1);
7981                 }
7982         }
7983
7984         return ret;
7985 }
7986
7987 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7988                                         u64 start, u64 len,
7989                                         int pin, int delalloc)
7990 {
7991         struct btrfs_block_group_cache *cache;
7992         int ret = 0;
7993
7994         cache = btrfs_lookup_block_group(root->fs_info, start);
7995         if (!cache) {
7996                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7997                         start);
7998                 return -ENOSPC;
7999         }
8000
8001         if (pin)
8002                 pin_down_extent(root, cache, start, len, 1);
8003         else {
8004                 if (btrfs_test_opt(root->fs_info, DISCARD))
8005                         ret = btrfs_discard_extent(root, start, len, NULL);
8006                 btrfs_add_free_space(cache, start, len);
8007                 btrfs_free_reserved_bytes(cache, len, delalloc);
8008                 trace_btrfs_reserved_extent_free(root, start, len);
8009         }
8010
8011         btrfs_put_block_group(cache);
8012         return ret;
8013 }
8014
8015 int btrfs_free_reserved_extent(struct btrfs_root *root,
8016                                u64 start, u64 len, int delalloc)
8017 {
8018         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
8019 }
8020
8021 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
8022                                        u64 start, u64 len)
8023 {
8024         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
8025 }
8026
8027 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8028                                       struct btrfs_root *root,
8029                                       u64 parent, u64 root_objectid,
8030                                       u64 flags, u64 owner, u64 offset,
8031                                       struct btrfs_key *ins, int ref_mod)
8032 {
8033         int ret;
8034         struct btrfs_fs_info *fs_info = root->fs_info;
8035         struct btrfs_extent_item *extent_item;
8036         struct btrfs_extent_inline_ref *iref;
8037         struct btrfs_path *path;
8038         struct extent_buffer *leaf;
8039         int type;
8040         u32 size;
8041
8042         if (parent > 0)
8043                 type = BTRFS_SHARED_DATA_REF_KEY;
8044         else
8045                 type = BTRFS_EXTENT_DATA_REF_KEY;
8046
8047         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8048
8049         path = btrfs_alloc_path();
8050         if (!path)
8051                 return -ENOMEM;
8052
8053         path->leave_spinning = 1;
8054         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8055                                       ins, size);
8056         if (ret) {
8057                 btrfs_free_path(path);
8058                 return ret;
8059         }
8060
8061         leaf = path->nodes[0];
8062         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8063                                      struct btrfs_extent_item);
8064         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8065         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8066         btrfs_set_extent_flags(leaf, extent_item,
8067                                flags | BTRFS_EXTENT_FLAG_DATA);
8068
8069         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8070         btrfs_set_extent_inline_ref_type(leaf, iref, type);
8071         if (parent > 0) {
8072                 struct btrfs_shared_data_ref *ref;
8073                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
8074                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8075                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8076         } else {
8077                 struct btrfs_extent_data_ref *ref;
8078                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8079                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8080                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8081                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8082                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8083         }
8084
8085         btrfs_mark_buffer_dirty(path->nodes[0]);
8086         btrfs_free_path(path);
8087
8088         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8089                                           ins->offset);
8090         if (ret)
8091                 return ret;
8092
8093         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
8094         if (ret) { /* -ENOENT, logic error */
8095                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8096                         ins->objectid, ins->offset);
8097                 BUG();
8098         }
8099         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
8100         return ret;
8101 }
8102
8103 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8104                                      struct btrfs_root *root,
8105                                      u64 parent, u64 root_objectid,
8106                                      u64 flags, struct btrfs_disk_key *key,
8107                                      int level, struct btrfs_key *ins)
8108 {
8109         int ret;
8110         struct btrfs_fs_info *fs_info = root->fs_info;
8111         struct btrfs_extent_item *extent_item;
8112         struct btrfs_tree_block_info *block_info;
8113         struct btrfs_extent_inline_ref *iref;
8114         struct btrfs_path *path;
8115         struct extent_buffer *leaf;
8116         u32 size = sizeof(*extent_item) + sizeof(*iref);
8117         u64 num_bytes = ins->offset;
8118         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8119                                                  SKINNY_METADATA);
8120
8121         if (!skinny_metadata)
8122                 size += sizeof(*block_info);
8123
8124         path = btrfs_alloc_path();
8125         if (!path) {
8126                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8127                                                    root->nodesize);
8128                 return -ENOMEM;
8129         }
8130
8131         path->leave_spinning = 1;
8132         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8133                                       ins, size);
8134         if (ret) {
8135                 btrfs_free_path(path);
8136                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8137                                                    root->nodesize);
8138                 return ret;
8139         }
8140
8141         leaf = path->nodes[0];
8142         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8143                                      struct btrfs_extent_item);
8144         btrfs_set_extent_refs(leaf, extent_item, 1);
8145         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8146         btrfs_set_extent_flags(leaf, extent_item,
8147                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8148
8149         if (skinny_metadata) {
8150                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8151                 num_bytes = root->nodesize;
8152         } else {
8153                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8154                 btrfs_set_tree_block_key(leaf, block_info, key);
8155                 btrfs_set_tree_block_level(leaf, block_info, level);
8156                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8157         }
8158
8159         if (parent > 0) {
8160                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8161                 btrfs_set_extent_inline_ref_type(leaf, iref,
8162                                                  BTRFS_SHARED_BLOCK_REF_KEY);
8163                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8164         } else {
8165                 btrfs_set_extent_inline_ref_type(leaf, iref,
8166                                                  BTRFS_TREE_BLOCK_REF_KEY);
8167                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
8168         }
8169
8170         btrfs_mark_buffer_dirty(leaf);
8171         btrfs_free_path(path);
8172
8173         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8174                                           num_bytes);
8175         if (ret)
8176                 return ret;
8177
8178         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
8179                                  1);
8180         if (ret) { /* -ENOENT, logic error */
8181                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8182                         ins->objectid, ins->offset);
8183                 BUG();
8184         }
8185
8186         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
8187         return ret;
8188 }
8189
8190 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8191                                      struct btrfs_root *root,
8192                                      u64 root_objectid, u64 owner,
8193                                      u64 offset, u64 ram_bytes,
8194                                      struct btrfs_key *ins)
8195 {
8196         int ret;
8197
8198         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
8199
8200         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
8201                                          ins->offset, 0,
8202                                          root_objectid, owner, offset,
8203                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
8204                                          NULL);
8205         return ret;
8206 }
8207
8208 /*
8209  * this is used by the tree logging recovery code.  It records that
8210  * an extent has been allocated and makes sure to clear the free
8211  * space cache bits as well
8212  */
8213 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8214                                    struct btrfs_root *root,
8215                                    u64 root_objectid, u64 owner, u64 offset,
8216                                    struct btrfs_key *ins)
8217 {
8218         int ret;
8219         struct btrfs_block_group_cache *block_group;
8220         struct btrfs_space_info *space_info;
8221
8222         /*
8223          * Mixed block groups will exclude before processing the log so we only
8224          * need to do the exclude dance if this fs isn't mixed.
8225          */
8226         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
8227                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
8228                 if (ret)
8229                         return ret;
8230         }
8231
8232         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
8233         if (!block_group)
8234                 return -EINVAL;
8235
8236         space_info = block_group->space_info;
8237         spin_lock(&space_info->lock);
8238         spin_lock(&block_group->lock);
8239         space_info->bytes_reserved += ins->offset;
8240         block_group->reserved += ins->offset;
8241         spin_unlock(&block_group->lock);
8242         spin_unlock(&space_info->lock);
8243
8244         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
8245                                          0, owner, offset, ins, 1);
8246         btrfs_put_block_group(block_group);
8247         return ret;
8248 }
8249
8250 static struct extent_buffer *
8251 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8252                       u64 bytenr, int level)
8253 {
8254         struct extent_buffer *buf;
8255
8256         buf = btrfs_find_create_tree_block(root, bytenr);
8257         if (IS_ERR(buf))
8258                 return buf;
8259
8260         btrfs_set_header_generation(buf, trans->transid);
8261         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8262         btrfs_tree_lock(buf);
8263         clean_tree_block(trans, root->fs_info, buf);
8264         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8265
8266         btrfs_set_lock_blocking(buf);
8267         set_extent_buffer_uptodate(buf);
8268
8269         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8270                 buf->log_index = root->log_transid % 2;
8271                 /*
8272                  * we allow two log transactions at a time, use different
8273                  * EXENT bit to differentiate dirty pages.
8274                  */
8275                 if (buf->log_index == 0)
8276                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8277                                         buf->start + buf->len - 1, GFP_NOFS);
8278                 else
8279                         set_extent_new(&root->dirty_log_pages, buf->start,
8280                                         buf->start + buf->len - 1);
8281         } else {
8282                 buf->log_index = -1;
8283                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8284                          buf->start + buf->len - 1, GFP_NOFS);
8285         }
8286         trans->dirty = true;
8287         /* this returns a buffer locked for blocking */
8288         return buf;
8289 }
8290
8291 static struct btrfs_block_rsv *
8292 use_block_rsv(struct btrfs_trans_handle *trans,
8293               struct btrfs_root *root, u32 blocksize)
8294 {
8295         struct btrfs_block_rsv *block_rsv;
8296         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
8297         int ret;
8298         bool global_updated = false;
8299
8300         block_rsv = get_block_rsv(trans, root);
8301
8302         if (unlikely(block_rsv->size == 0))
8303                 goto try_reserve;
8304 again:
8305         ret = block_rsv_use_bytes(block_rsv, blocksize);
8306         if (!ret)
8307                 return block_rsv;
8308
8309         if (block_rsv->failfast)
8310                 return ERR_PTR(ret);
8311
8312         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8313                 global_updated = true;
8314                 update_global_block_rsv(root->fs_info);
8315                 goto again;
8316         }
8317
8318         if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
8319                 static DEFINE_RATELIMIT_STATE(_rs,
8320                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8321                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8322                 if (__ratelimit(&_rs))
8323                         WARN(1, KERN_DEBUG
8324                                 "BTRFS: block rsv returned %d\n", ret);
8325         }
8326 try_reserve:
8327         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8328                                      BTRFS_RESERVE_NO_FLUSH);
8329         if (!ret)
8330                 return block_rsv;
8331         /*
8332          * If we couldn't reserve metadata bytes try and use some from
8333          * the global reserve if its space type is the same as the global
8334          * reservation.
8335          */
8336         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8337             block_rsv->space_info == global_rsv->space_info) {
8338                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8339                 if (!ret)
8340                         return global_rsv;
8341         }
8342         return ERR_PTR(ret);
8343 }
8344
8345 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8346                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8347 {
8348         block_rsv_add_bytes(block_rsv, blocksize, 0);
8349         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8350 }
8351
8352 /*
8353  * finds a free extent and does all the dirty work required for allocation
8354  * returns the tree buffer or an ERR_PTR on error.
8355  */
8356 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8357                                         struct btrfs_root *root,
8358                                         u64 parent, u64 root_objectid,
8359                                         struct btrfs_disk_key *key, int level,
8360                                         u64 hint, u64 empty_size)
8361 {
8362         struct btrfs_key ins;
8363         struct btrfs_block_rsv *block_rsv;
8364         struct extent_buffer *buf;
8365         struct btrfs_delayed_extent_op *extent_op;
8366         u64 flags = 0;
8367         int ret;
8368         u32 blocksize = root->nodesize;
8369         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8370                                                  SKINNY_METADATA);
8371
8372 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8373         if (btrfs_is_testing(root->fs_info)) {
8374                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8375                                             level);
8376                 if (!IS_ERR(buf))
8377                         root->alloc_bytenr += blocksize;
8378                 return buf;
8379         }
8380 #endif
8381
8382         block_rsv = use_block_rsv(trans, root, blocksize);
8383         if (IS_ERR(block_rsv))
8384                 return ERR_CAST(block_rsv);
8385
8386         ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8387                                    empty_size, hint, &ins, 0, 0);
8388         if (ret)
8389                 goto out_unuse;
8390
8391         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8392         if (IS_ERR(buf)) {
8393                 ret = PTR_ERR(buf);
8394                 goto out_free_reserved;
8395         }
8396
8397         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8398                 if (parent == 0)
8399                         parent = ins.objectid;
8400                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8401         } else
8402                 BUG_ON(parent > 0);
8403
8404         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8405                 extent_op = btrfs_alloc_delayed_extent_op();
8406                 if (!extent_op) {
8407                         ret = -ENOMEM;
8408                         goto out_free_buf;
8409                 }
8410                 if (key)
8411                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8412                 else
8413                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8414                 extent_op->flags_to_set = flags;
8415                 extent_op->update_key = skinny_metadata ? false : true;
8416                 extent_op->update_flags = true;
8417                 extent_op->is_data = false;
8418                 extent_op->level = level;
8419
8420                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8421                                                  ins.objectid, ins.offset,
8422                                                  parent, root_objectid, level,
8423                                                  BTRFS_ADD_DELAYED_EXTENT,
8424                                                  extent_op);
8425                 if (ret)
8426                         goto out_free_delayed;
8427         }
8428         return buf;
8429
8430 out_free_delayed:
8431         btrfs_free_delayed_extent_op(extent_op);
8432 out_free_buf:
8433         free_extent_buffer(buf);
8434 out_free_reserved:
8435         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8436 out_unuse:
8437         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8438         return ERR_PTR(ret);
8439 }
8440
8441 struct walk_control {
8442         u64 refs[BTRFS_MAX_LEVEL];
8443         u64 flags[BTRFS_MAX_LEVEL];
8444         struct btrfs_key update_progress;
8445         int stage;
8446         int level;
8447         int shared_level;
8448         int update_ref;
8449         int keep_locks;
8450         int reada_slot;
8451         int reada_count;
8452         int for_reloc;
8453 };
8454
8455 #define DROP_REFERENCE  1
8456 #define UPDATE_BACKREF  2
8457
8458 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8459                                      struct btrfs_root *root,
8460                                      struct walk_control *wc,
8461                                      struct btrfs_path *path)
8462 {
8463         u64 bytenr;
8464         u64 generation;
8465         u64 refs;
8466         u64 flags;
8467         u32 nritems;
8468         u32 blocksize;
8469         struct btrfs_key key;
8470         struct extent_buffer *eb;
8471         int ret;
8472         int slot;
8473         int nread = 0;
8474
8475         if (path->slots[wc->level] < wc->reada_slot) {
8476                 wc->reada_count = wc->reada_count * 2 / 3;
8477                 wc->reada_count = max(wc->reada_count, 2);
8478         } else {
8479                 wc->reada_count = wc->reada_count * 3 / 2;
8480                 wc->reada_count = min_t(int, wc->reada_count,
8481                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8482         }
8483
8484         eb = path->nodes[wc->level];
8485         nritems = btrfs_header_nritems(eb);
8486         blocksize = root->nodesize;
8487
8488         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8489                 if (nread >= wc->reada_count)
8490                         break;
8491
8492                 cond_resched();
8493                 bytenr = btrfs_node_blockptr(eb, slot);
8494                 generation = btrfs_node_ptr_generation(eb, slot);
8495
8496                 if (slot == path->slots[wc->level])
8497                         goto reada;
8498
8499                 if (wc->stage == UPDATE_BACKREF &&
8500                     generation <= root->root_key.offset)
8501                         continue;
8502
8503                 /* We don't lock the tree block, it's OK to be racy here */
8504                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8505                                                wc->level - 1, 1, &refs,
8506                                                &flags);
8507                 /* We don't care about errors in readahead. */
8508                 if (ret < 0)
8509                         continue;
8510                 BUG_ON(refs == 0);
8511
8512                 if (wc->stage == DROP_REFERENCE) {
8513                         if (refs == 1)
8514                                 goto reada;
8515
8516                         if (wc->level == 1 &&
8517                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8518                                 continue;
8519                         if (!wc->update_ref ||
8520                             generation <= root->root_key.offset)
8521                                 continue;
8522                         btrfs_node_key_to_cpu(eb, &key, slot);
8523                         ret = btrfs_comp_cpu_keys(&key,
8524                                                   &wc->update_progress);
8525                         if (ret < 0)
8526                                 continue;
8527                 } else {
8528                         if (wc->level == 1 &&
8529                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8530                                 continue;
8531                 }
8532 reada:
8533                 readahead_tree_block(root, bytenr);
8534                 nread++;
8535         }
8536         wc->reada_slot = slot;
8537 }
8538
8539 static int account_leaf_items(struct btrfs_trans_handle *trans,
8540                               struct btrfs_root *root,
8541                               struct extent_buffer *eb)
8542 {
8543         int nr = btrfs_header_nritems(eb);
8544         int i, extent_type, ret;
8545         struct btrfs_key key;
8546         struct btrfs_file_extent_item *fi;
8547         u64 bytenr, num_bytes;
8548
8549         /* We can be called directly from walk_up_proc() */
8550         if (!root->fs_info->quota_enabled)
8551                 return 0;
8552
8553         for (i = 0; i < nr; i++) {
8554                 btrfs_item_key_to_cpu(eb, &key, i);
8555
8556                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8557                         continue;
8558
8559                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8560                 /* filter out non qgroup-accountable extents  */
8561                 extent_type = btrfs_file_extent_type(eb, fi);
8562
8563                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8564                         continue;
8565
8566                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8567                 if (!bytenr)
8568                         continue;
8569
8570                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8571
8572                 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
8573                                 bytenr, num_bytes, GFP_NOFS);
8574                 if (ret)
8575                         return ret;
8576         }
8577         return 0;
8578 }
8579
8580 /*
8581  * Walk up the tree from the bottom, freeing leaves and any interior
8582  * nodes which have had all slots visited. If a node (leaf or
8583  * interior) is freed, the node above it will have it's slot
8584  * incremented. The root node will never be freed.
8585  *
8586  * At the end of this function, we should have a path which has all
8587  * slots incremented to the next position for a search. If we need to
8588  * read a new node it will be NULL and the node above it will have the
8589  * correct slot selected for a later read.
8590  *
8591  * If we increment the root nodes slot counter past the number of
8592  * elements, 1 is returned to signal completion of the search.
8593  */
8594 static int adjust_slots_upwards(struct btrfs_root *root,
8595                                 struct btrfs_path *path, int root_level)
8596 {
8597         int level = 0;
8598         int nr, slot;
8599         struct extent_buffer *eb;
8600
8601         if (root_level == 0)
8602                 return 1;
8603
8604         while (level <= root_level) {
8605                 eb = path->nodes[level];
8606                 nr = btrfs_header_nritems(eb);
8607                 path->slots[level]++;
8608                 slot = path->slots[level];
8609                 if (slot >= nr || level == 0) {
8610                         /*
8611                          * Don't free the root -  we will detect this
8612                          * condition after our loop and return a
8613                          * positive value for caller to stop walking the tree.
8614                          */
8615                         if (level != root_level) {
8616                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8617                                 path->locks[level] = 0;
8618
8619                                 free_extent_buffer(eb);
8620                                 path->nodes[level] = NULL;
8621                                 path->slots[level] = 0;
8622                         }
8623                 } else {
8624                         /*
8625                          * We have a valid slot to walk back down
8626                          * from. Stop here so caller can process these
8627                          * new nodes.
8628                          */
8629                         break;
8630                 }
8631
8632                 level++;
8633         }
8634
8635         eb = path->nodes[root_level];
8636         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8637                 return 1;
8638
8639         return 0;
8640 }
8641
8642 /*
8643  * root_eb is the subtree root and is locked before this function is called.
8644  */
8645 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8646                                   struct btrfs_root *root,
8647                                   struct extent_buffer *root_eb,
8648                                   u64 root_gen,
8649                                   int root_level)
8650 {
8651         int ret = 0;
8652         int level;
8653         struct extent_buffer *eb = root_eb;
8654         struct btrfs_path *path = NULL;
8655
8656         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8657         BUG_ON(root_eb == NULL);
8658
8659         if (!root->fs_info->quota_enabled)
8660                 return 0;
8661
8662         if (!extent_buffer_uptodate(root_eb)) {
8663                 ret = btrfs_read_buffer(root_eb, root_gen);
8664                 if (ret)
8665                         goto out;
8666         }
8667
8668         if (root_level == 0) {
8669                 ret = account_leaf_items(trans, root, root_eb);
8670                 goto out;
8671         }
8672
8673         path = btrfs_alloc_path();
8674         if (!path)
8675                 return -ENOMEM;
8676
8677         /*
8678          * Walk down the tree.  Missing extent blocks are filled in as
8679          * we go. Metadata is accounted every time we read a new
8680          * extent block.
8681          *
8682          * When we reach a leaf, we account for file extent items in it,
8683          * walk back up the tree (adjusting slot pointers as we go)
8684          * and restart the search process.
8685          */
8686         extent_buffer_get(root_eb); /* For path */
8687         path->nodes[root_level] = root_eb;
8688         path->slots[root_level] = 0;
8689         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8690 walk_down:
8691         level = root_level;
8692         while (level >= 0) {
8693                 if (path->nodes[level] == NULL) {
8694                         int parent_slot;
8695                         u64 child_gen;
8696                         u64 child_bytenr;
8697
8698                         /* We need to get child blockptr/gen from
8699                          * parent before we can read it. */
8700                         eb = path->nodes[level + 1];
8701                         parent_slot = path->slots[level + 1];
8702                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8703                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8704
8705                         eb = read_tree_block(root, child_bytenr, child_gen);
8706                         if (IS_ERR(eb)) {
8707                                 ret = PTR_ERR(eb);
8708                                 goto out;
8709                         } else if (!extent_buffer_uptodate(eb)) {
8710                                 free_extent_buffer(eb);
8711                                 ret = -EIO;
8712                                 goto out;
8713                         }
8714
8715                         path->nodes[level] = eb;
8716                         path->slots[level] = 0;
8717
8718                         btrfs_tree_read_lock(eb);
8719                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8720                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8721
8722                         ret = btrfs_qgroup_insert_dirty_extent(trans,
8723                                         root->fs_info, child_bytenr,
8724                                         root->nodesize, GFP_NOFS);
8725                         if (ret)
8726                                 goto out;
8727                 }
8728
8729                 if (level == 0) {
8730                         ret = account_leaf_items(trans, root, path->nodes[level]);
8731                         if (ret)
8732                                 goto out;
8733
8734                         /* Nonzero return here means we completed our search */
8735                         ret = adjust_slots_upwards(root, path, root_level);
8736                         if (ret)
8737                                 break;
8738
8739                         /* Restart search with new slots */
8740                         goto walk_down;
8741                 }
8742
8743                 level--;
8744         }
8745
8746         ret = 0;
8747 out:
8748         btrfs_free_path(path);
8749
8750         return ret;
8751 }
8752
8753 /*
8754  * helper to process tree block while walking down the tree.
8755  *
8756  * when wc->stage == UPDATE_BACKREF, this function updates
8757  * back refs for pointers in the block.
8758  *
8759  * NOTE: return value 1 means we should stop walking down.
8760  */
8761 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8762                                    struct btrfs_root *root,
8763                                    struct btrfs_path *path,
8764                                    struct walk_control *wc, int lookup_info)
8765 {
8766         int level = wc->level;
8767         struct extent_buffer *eb = path->nodes[level];
8768         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8769         int ret;
8770
8771         if (wc->stage == UPDATE_BACKREF &&
8772             btrfs_header_owner(eb) != root->root_key.objectid)
8773                 return 1;
8774
8775         /*
8776          * when reference count of tree block is 1, it won't increase
8777          * again. once full backref flag is set, we never clear it.
8778          */
8779         if (lookup_info &&
8780             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8781              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8782                 BUG_ON(!path->locks[level]);
8783                 ret = btrfs_lookup_extent_info(trans, root,
8784                                                eb->start, level, 1,
8785                                                &wc->refs[level],
8786                                                &wc->flags[level]);
8787                 BUG_ON(ret == -ENOMEM);
8788                 if (ret)
8789                         return ret;
8790                 BUG_ON(wc->refs[level] == 0);
8791         }
8792
8793         if (wc->stage == DROP_REFERENCE) {
8794                 if (wc->refs[level] > 1)
8795                         return 1;
8796
8797                 if (path->locks[level] && !wc->keep_locks) {
8798                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8799                         path->locks[level] = 0;
8800                 }
8801                 return 0;
8802         }
8803
8804         /* wc->stage == UPDATE_BACKREF */
8805         if (!(wc->flags[level] & flag)) {
8806                 BUG_ON(!path->locks[level]);
8807                 ret = btrfs_inc_ref(trans, root, eb, 1);
8808                 BUG_ON(ret); /* -ENOMEM */
8809                 ret = btrfs_dec_ref(trans, root, eb, 0);
8810                 BUG_ON(ret); /* -ENOMEM */
8811                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8812                                                   eb->len, flag,
8813                                                   btrfs_header_level(eb), 0);
8814                 BUG_ON(ret); /* -ENOMEM */
8815                 wc->flags[level] |= flag;
8816         }
8817
8818         /*
8819          * the block is shared by multiple trees, so it's not good to
8820          * keep the tree lock
8821          */
8822         if (path->locks[level] && level > 0) {
8823                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8824                 path->locks[level] = 0;
8825         }
8826         return 0;
8827 }
8828
8829 /*
8830  * helper to process tree block pointer.
8831  *
8832  * when wc->stage == DROP_REFERENCE, this function checks
8833  * reference count of the block pointed to. if the block
8834  * is shared and we need update back refs for the subtree
8835  * rooted at the block, this function changes wc->stage to
8836  * UPDATE_BACKREF. if the block is shared and there is no
8837  * need to update back, this function drops the reference
8838  * to the block.
8839  *
8840  * NOTE: return value 1 means we should stop walking down.
8841  */
8842 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8843                                  struct btrfs_root *root,
8844                                  struct btrfs_path *path,
8845                                  struct walk_control *wc, int *lookup_info)
8846 {
8847         u64 bytenr;
8848         u64 generation;
8849         u64 parent;
8850         u32 blocksize;
8851         struct btrfs_key key;
8852         struct extent_buffer *next;
8853         int level = wc->level;
8854         int reada = 0;
8855         int ret = 0;
8856         bool need_account = false;
8857
8858         generation = btrfs_node_ptr_generation(path->nodes[level],
8859                                                path->slots[level]);
8860         /*
8861          * if the lower level block was created before the snapshot
8862          * was created, we know there is no need to update back refs
8863          * for the subtree
8864          */
8865         if (wc->stage == UPDATE_BACKREF &&
8866             generation <= root->root_key.offset) {
8867                 *lookup_info = 1;
8868                 return 1;
8869         }
8870
8871         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8872         blocksize = root->nodesize;
8873
8874         next = btrfs_find_tree_block(root->fs_info, bytenr);
8875         if (!next) {
8876                 next = btrfs_find_create_tree_block(root, bytenr);
8877                 if (IS_ERR(next))
8878                         return PTR_ERR(next);
8879
8880                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8881                                                level - 1);
8882                 reada = 1;
8883         }
8884         btrfs_tree_lock(next);
8885         btrfs_set_lock_blocking(next);
8886
8887         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8888                                        &wc->refs[level - 1],
8889                                        &wc->flags[level - 1]);
8890         if (ret < 0) {
8891                 btrfs_tree_unlock(next);
8892                 return ret;
8893         }
8894
8895         if (unlikely(wc->refs[level - 1] == 0)) {
8896                 btrfs_err(root->fs_info, "Missing references.");
8897                 BUG();
8898         }
8899         *lookup_info = 0;
8900
8901         if (wc->stage == DROP_REFERENCE) {
8902                 if (wc->refs[level - 1] > 1) {
8903                         need_account = true;
8904                         if (level == 1 &&
8905                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8906                                 goto skip;
8907
8908                         if (!wc->update_ref ||
8909                             generation <= root->root_key.offset)
8910                                 goto skip;
8911
8912                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8913                                               path->slots[level]);
8914                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8915                         if (ret < 0)
8916                                 goto skip;
8917
8918                         wc->stage = UPDATE_BACKREF;
8919                         wc->shared_level = level - 1;
8920                 }
8921         } else {
8922                 if (level == 1 &&
8923                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8924                         goto skip;
8925         }
8926
8927         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8928                 btrfs_tree_unlock(next);
8929                 free_extent_buffer(next);
8930                 next = NULL;
8931                 *lookup_info = 1;
8932         }
8933
8934         if (!next) {
8935                 if (reada && level == 1)
8936                         reada_walk_down(trans, root, wc, path);
8937                 next = read_tree_block(root, bytenr, generation);
8938                 if (IS_ERR(next)) {
8939                         return PTR_ERR(next);
8940                 } else if (!extent_buffer_uptodate(next)) {
8941                         free_extent_buffer(next);
8942                         return -EIO;
8943                 }
8944                 btrfs_tree_lock(next);
8945                 btrfs_set_lock_blocking(next);
8946         }
8947
8948         level--;
8949         BUG_ON(level != btrfs_header_level(next));
8950         path->nodes[level] = next;
8951         path->slots[level] = 0;
8952         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8953         wc->level = level;
8954         if (wc->level == 1)
8955                 wc->reada_slot = 0;
8956         return 0;
8957 skip:
8958         wc->refs[level - 1] = 0;
8959         wc->flags[level - 1] = 0;
8960         if (wc->stage == DROP_REFERENCE) {
8961                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8962                         parent = path->nodes[level]->start;
8963                 } else {
8964                         BUG_ON(root->root_key.objectid !=
8965                                btrfs_header_owner(path->nodes[level]));
8966                         parent = 0;
8967                 }
8968
8969                 if (need_account) {
8970                         ret = account_shared_subtree(trans, root, next,
8971                                                      generation, level - 1);
8972                         if (ret) {
8973                                 btrfs_err_rl(root->fs_info,
8974                                         "Error "
8975                                         "%d accounting shared subtree. Quota "
8976                                         "is out of sync, rescan required.",
8977                                         ret);
8978                         }
8979                 }
8980                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8981                                 root->root_key.objectid, level - 1, 0);
8982                 BUG_ON(ret); /* -ENOMEM */
8983         }
8984         btrfs_tree_unlock(next);
8985         free_extent_buffer(next);
8986         *lookup_info = 1;
8987         return 1;
8988 }
8989
8990 /*
8991  * helper to process tree block while walking up the tree.
8992  *
8993  * when wc->stage == DROP_REFERENCE, this function drops
8994  * reference count on the block.
8995  *
8996  * when wc->stage == UPDATE_BACKREF, this function changes
8997  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8998  * to UPDATE_BACKREF previously while processing the block.
8999  *
9000  * NOTE: return value 1 means we should stop walking up.
9001  */
9002 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
9003                                  struct btrfs_root *root,
9004                                  struct btrfs_path *path,
9005                                  struct walk_control *wc)
9006 {
9007         int ret;
9008         int level = wc->level;
9009         struct extent_buffer *eb = path->nodes[level];
9010         u64 parent = 0;
9011
9012         if (wc->stage == UPDATE_BACKREF) {
9013                 BUG_ON(wc->shared_level < level);
9014                 if (level < wc->shared_level)
9015                         goto out;
9016
9017                 ret = find_next_key(path, level + 1, &wc->update_progress);
9018                 if (ret > 0)
9019                         wc->update_ref = 0;
9020
9021                 wc->stage = DROP_REFERENCE;
9022                 wc->shared_level = -1;
9023                 path->slots[level] = 0;
9024
9025                 /*
9026                  * check reference count again if the block isn't locked.
9027                  * we should start walking down the tree again if reference
9028                  * count is one.
9029                  */
9030                 if (!path->locks[level]) {
9031                         BUG_ON(level == 0);
9032                         btrfs_tree_lock(eb);
9033                         btrfs_set_lock_blocking(eb);
9034                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9035
9036                         ret = btrfs_lookup_extent_info(trans, root,
9037                                                        eb->start, level, 1,
9038                                                        &wc->refs[level],
9039                                                        &wc->flags[level]);
9040                         if (ret < 0) {
9041                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9042                                 path->locks[level] = 0;
9043                                 return ret;
9044                         }
9045                         BUG_ON(wc->refs[level] == 0);
9046                         if (wc->refs[level] == 1) {
9047                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9048                                 path->locks[level] = 0;
9049                                 return 1;
9050                         }
9051                 }
9052         }
9053
9054         /* wc->stage == DROP_REFERENCE */
9055         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
9056
9057         if (wc->refs[level] == 1) {
9058                 if (level == 0) {
9059                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9060                                 ret = btrfs_dec_ref(trans, root, eb, 1);
9061                         else
9062                                 ret = btrfs_dec_ref(trans, root, eb, 0);
9063                         BUG_ON(ret); /* -ENOMEM */
9064                         ret = account_leaf_items(trans, root, eb);
9065                         if (ret) {
9066                                 btrfs_err_rl(root->fs_info,
9067                                         "error "
9068                                         "%d accounting leaf items. Quota "
9069                                         "is out of sync, rescan required.",
9070                                         ret);
9071                         }
9072                 }
9073                 /* make block locked assertion in clean_tree_block happy */
9074                 if (!path->locks[level] &&
9075                     btrfs_header_generation(eb) == trans->transid) {
9076                         btrfs_tree_lock(eb);
9077                         btrfs_set_lock_blocking(eb);
9078                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9079                 }
9080                 clean_tree_block(trans, root->fs_info, eb);
9081         }
9082
9083         if (eb == root->node) {
9084                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9085                         parent = eb->start;
9086                 else
9087                         BUG_ON(root->root_key.objectid !=
9088                                btrfs_header_owner(eb));
9089         } else {
9090                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9091                         parent = path->nodes[level + 1]->start;
9092                 else
9093                         BUG_ON(root->root_key.objectid !=
9094                                btrfs_header_owner(path->nodes[level + 1]));
9095         }
9096
9097         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
9098 out:
9099         wc->refs[level] = 0;
9100         wc->flags[level] = 0;
9101         return 0;
9102 }
9103
9104 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
9105                                    struct btrfs_root *root,
9106                                    struct btrfs_path *path,
9107                                    struct walk_control *wc)
9108 {
9109         int level = wc->level;
9110         int lookup_info = 1;
9111         int ret;
9112
9113         while (level >= 0) {
9114                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
9115                 if (ret > 0)
9116                         break;
9117
9118                 if (level == 0)
9119                         break;
9120
9121                 if (path->slots[level] >=
9122                     btrfs_header_nritems(path->nodes[level]))
9123                         break;
9124
9125                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
9126                 if (ret > 0) {
9127                         path->slots[level]++;
9128                         continue;
9129                 } else if (ret < 0)
9130                         return ret;
9131                 level = wc->level;
9132         }
9133         return 0;
9134 }
9135
9136 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
9137                                  struct btrfs_root *root,
9138                                  struct btrfs_path *path,
9139                                  struct walk_control *wc, int max_level)
9140 {
9141         int level = wc->level;
9142         int ret;
9143
9144         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
9145         while (level < max_level && path->nodes[level]) {
9146                 wc->level = level;
9147                 if (path->slots[level] + 1 <
9148                     btrfs_header_nritems(path->nodes[level])) {
9149                         path->slots[level]++;
9150                         return 0;
9151                 } else {
9152                         ret = walk_up_proc(trans, root, path, wc);
9153                         if (ret > 0)
9154                                 return 0;
9155
9156                         if (path->locks[level]) {
9157                                 btrfs_tree_unlock_rw(path->nodes[level],
9158                                                      path->locks[level]);
9159                                 path->locks[level] = 0;
9160                         }
9161                         free_extent_buffer(path->nodes[level]);
9162                         path->nodes[level] = NULL;
9163                         level++;
9164                 }
9165         }
9166         return 1;
9167 }
9168
9169 /*
9170  * drop a subvolume tree.
9171  *
9172  * this function traverses the tree freeing any blocks that only
9173  * referenced by the tree.
9174  *
9175  * when a shared tree block is found. this function decreases its
9176  * reference count by one. if update_ref is true, this function
9177  * also make sure backrefs for the shared block and all lower level
9178  * blocks are properly updated.
9179  *
9180  * If called with for_reloc == 0, may exit early with -EAGAIN
9181  */
9182 int btrfs_drop_snapshot(struct btrfs_root *root,
9183                          struct btrfs_block_rsv *block_rsv, int update_ref,
9184                          int for_reloc)
9185 {
9186         struct btrfs_path *path;
9187         struct btrfs_trans_handle *trans;
9188         struct btrfs_root *tree_root = root->fs_info->tree_root;
9189         struct btrfs_root_item *root_item = &root->root_item;
9190         struct walk_control *wc;
9191         struct btrfs_key key;
9192         int err = 0;
9193         int ret;
9194         int level;
9195         bool root_dropped = false;
9196
9197         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
9198
9199         path = btrfs_alloc_path();
9200         if (!path) {
9201                 err = -ENOMEM;
9202                 goto out;
9203         }
9204
9205         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9206         if (!wc) {
9207                 btrfs_free_path(path);
9208                 err = -ENOMEM;
9209                 goto out;
9210         }
9211
9212         trans = btrfs_start_transaction(tree_root, 0);
9213         if (IS_ERR(trans)) {
9214                 err = PTR_ERR(trans);
9215                 goto out_free;
9216         }
9217
9218         if (block_rsv)
9219                 trans->block_rsv = block_rsv;
9220
9221         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9222                 level = btrfs_header_level(root->node);
9223                 path->nodes[level] = btrfs_lock_root_node(root);
9224                 btrfs_set_lock_blocking(path->nodes[level]);
9225                 path->slots[level] = 0;
9226                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9227                 memset(&wc->update_progress, 0,
9228                        sizeof(wc->update_progress));
9229         } else {
9230                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9231                 memcpy(&wc->update_progress, &key,
9232                        sizeof(wc->update_progress));
9233
9234                 level = root_item->drop_level;
9235                 BUG_ON(level == 0);
9236                 path->lowest_level = level;
9237                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9238                 path->lowest_level = 0;
9239                 if (ret < 0) {
9240                         err = ret;
9241                         goto out_end_trans;
9242                 }
9243                 WARN_ON(ret > 0);
9244
9245                 /*
9246                  * unlock our path, this is safe because only this
9247                  * function is allowed to delete this snapshot
9248                  */
9249                 btrfs_unlock_up_safe(path, 0);
9250
9251                 level = btrfs_header_level(root->node);
9252                 while (1) {
9253                         btrfs_tree_lock(path->nodes[level]);
9254                         btrfs_set_lock_blocking(path->nodes[level]);
9255                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9256
9257                         ret = btrfs_lookup_extent_info(trans, root,
9258                                                 path->nodes[level]->start,
9259                                                 level, 1, &wc->refs[level],
9260                                                 &wc->flags[level]);
9261                         if (ret < 0) {
9262                                 err = ret;
9263                                 goto out_end_trans;
9264                         }
9265                         BUG_ON(wc->refs[level] == 0);
9266
9267                         if (level == root_item->drop_level)
9268                                 break;
9269
9270                         btrfs_tree_unlock(path->nodes[level]);
9271                         path->locks[level] = 0;
9272                         WARN_ON(wc->refs[level] != 1);
9273                         level--;
9274                 }
9275         }
9276
9277         wc->level = level;
9278         wc->shared_level = -1;
9279         wc->stage = DROP_REFERENCE;
9280         wc->update_ref = update_ref;
9281         wc->keep_locks = 0;
9282         wc->for_reloc = for_reloc;
9283         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9284
9285         while (1) {
9286
9287                 ret = walk_down_tree(trans, root, path, wc);
9288                 if (ret < 0) {
9289                         err = ret;
9290                         break;
9291                 }
9292
9293                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9294                 if (ret < 0) {
9295                         err = ret;
9296                         break;
9297                 }
9298
9299                 if (ret > 0) {
9300                         BUG_ON(wc->stage != DROP_REFERENCE);
9301                         break;
9302                 }
9303
9304                 if (wc->stage == DROP_REFERENCE) {
9305                         level = wc->level;
9306                         btrfs_node_key(path->nodes[level],
9307                                        &root_item->drop_progress,
9308                                        path->slots[level]);
9309                         root_item->drop_level = level;
9310                 }
9311
9312                 BUG_ON(wc->level == 0);
9313                 if (btrfs_should_end_transaction(trans, tree_root) ||
9314                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
9315                         ret = btrfs_update_root(trans, tree_root,
9316                                                 &root->root_key,
9317                                                 root_item);
9318                         if (ret) {
9319                                 btrfs_abort_transaction(trans, ret);
9320                                 err = ret;
9321                                 goto out_end_trans;
9322                         }
9323
9324                         btrfs_end_transaction_throttle(trans, tree_root);
9325                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
9326                                 pr_debug("BTRFS: drop snapshot early exit\n");
9327                                 err = -EAGAIN;
9328                                 goto out_free;
9329                         }
9330
9331                         trans = btrfs_start_transaction(tree_root, 0);
9332                         if (IS_ERR(trans)) {
9333                                 err = PTR_ERR(trans);
9334                                 goto out_free;
9335                         }
9336                         if (block_rsv)
9337                                 trans->block_rsv = block_rsv;
9338                 }
9339         }
9340         btrfs_release_path(path);
9341         if (err)
9342                 goto out_end_trans;
9343
9344         ret = btrfs_del_root(trans, tree_root, &root->root_key);
9345         if (ret) {
9346                 btrfs_abort_transaction(trans, ret);
9347                 goto out_end_trans;
9348         }
9349
9350         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9351                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9352                                       NULL, NULL);
9353                 if (ret < 0) {
9354                         btrfs_abort_transaction(trans, ret);
9355                         err = ret;
9356                         goto out_end_trans;
9357                 } else if (ret > 0) {
9358                         /* if we fail to delete the orphan item this time
9359                          * around, it'll get picked up the next time.
9360                          *
9361                          * The most common failure here is just -ENOENT.
9362                          */
9363                         btrfs_del_orphan_item(trans, tree_root,
9364                                               root->root_key.objectid);
9365                 }
9366         }
9367
9368         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9369                 btrfs_add_dropped_root(trans, root);
9370         } else {
9371                 free_extent_buffer(root->node);
9372                 free_extent_buffer(root->commit_root);
9373                 btrfs_put_fs_root(root);
9374         }
9375         root_dropped = true;
9376 out_end_trans:
9377         btrfs_end_transaction_throttle(trans, tree_root);
9378 out_free:
9379         kfree(wc);
9380         btrfs_free_path(path);
9381 out:
9382         /*
9383          * So if we need to stop dropping the snapshot for whatever reason we
9384          * need to make sure to add it back to the dead root list so that we
9385          * keep trying to do the work later.  This also cleans up roots if we
9386          * don't have it in the radix (like when we recover after a power fail
9387          * or unmount) so we don't leak memory.
9388          */
9389         if (!for_reloc && root_dropped == false)
9390                 btrfs_add_dead_root(root);
9391         if (err && err != -EAGAIN)
9392                 btrfs_handle_fs_error(root->fs_info, err, NULL);
9393         return err;
9394 }
9395
9396 /*
9397  * drop subtree rooted at tree block 'node'.
9398  *
9399  * NOTE: this function will unlock and release tree block 'node'
9400  * only used by relocation code
9401  */
9402 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9403                         struct btrfs_root *root,
9404                         struct extent_buffer *node,
9405                         struct extent_buffer *parent)
9406 {
9407         struct btrfs_path *path;
9408         struct walk_control *wc;
9409         int level;
9410         int parent_level;
9411         int ret = 0;
9412         int wret;
9413
9414         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9415
9416         path = btrfs_alloc_path();
9417         if (!path)
9418                 return -ENOMEM;
9419
9420         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9421         if (!wc) {
9422                 btrfs_free_path(path);
9423                 return -ENOMEM;
9424         }
9425
9426         btrfs_assert_tree_locked(parent);
9427         parent_level = btrfs_header_level(parent);
9428         extent_buffer_get(parent);
9429         path->nodes[parent_level] = parent;
9430         path->slots[parent_level] = btrfs_header_nritems(parent);
9431
9432         btrfs_assert_tree_locked(node);
9433         level = btrfs_header_level(node);
9434         path->nodes[level] = node;
9435         path->slots[level] = 0;
9436         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9437
9438         wc->refs[parent_level] = 1;
9439         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9440         wc->level = level;
9441         wc->shared_level = -1;
9442         wc->stage = DROP_REFERENCE;
9443         wc->update_ref = 0;
9444         wc->keep_locks = 1;
9445         wc->for_reloc = 1;
9446         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9447
9448         while (1) {
9449                 wret = walk_down_tree(trans, root, path, wc);
9450                 if (wret < 0) {
9451                         ret = wret;
9452                         break;
9453                 }
9454
9455                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9456                 if (wret < 0)
9457                         ret = wret;
9458                 if (wret != 0)
9459                         break;
9460         }
9461
9462         kfree(wc);
9463         btrfs_free_path(path);
9464         return ret;
9465 }
9466
9467 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9468 {
9469         u64 num_devices;
9470         u64 stripped;
9471
9472         /*
9473          * if restripe for this chunk_type is on pick target profile and
9474          * return, otherwise do the usual balance
9475          */
9476         stripped = get_restripe_target(root->fs_info, flags);
9477         if (stripped)
9478                 return extended_to_chunk(stripped);
9479
9480         num_devices = root->fs_info->fs_devices->rw_devices;
9481
9482         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9483                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9484                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9485
9486         if (num_devices == 1) {
9487                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9488                 stripped = flags & ~stripped;
9489
9490                 /* turn raid0 into single device chunks */
9491                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9492                         return stripped;
9493
9494                 /* turn mirroring into duplication */
9495                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9496                              BTRFS_BLOCK_GROUP_RAID10))
9497                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9498         } else {
9499                 /* they already had raid on here, just return */
9500                 if (flags & stripped)
9501                         return flags;
9502
9503                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9504                 stripped = flags & ~stripped;
9505
9506                 /* switch duplicated blocks with raid1 */
9507                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9508                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9509
9510                 /* this is drive concat, leave it alone */
9511         }
9512
9513         return flags;
9514 }
9515
9516 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9517 {
9518         struct btrfs_space_info *sinfo = cache->space_info;
9519         u64 num_bytes;
9520         u64 min_allocable_bytes;
9521         int ret = -ENOSPC;
9522
9523         /*
9524          * We need some metadata space and system metadata space for
9525          * allocating chunks in some corner cases until we force to set
9526          * it to be readonly.
9527          */
9528         if ((sinfo->flags &
9529              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9530             !force)
9531                 min_allocable_bytes = SZ_1M;
9532         else
9533                 min_allocable_bytes = 0;
9534
9535         spin_lock(&sinfo->lock);
9536         spin_lock(&cache->lock);
9537
9538         if (cache->ro) {
9539                 cache->ro++;
9540                 ret = 0;
9541                 goto out;
9542         }
9543
9544         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9545                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9546
9547         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9548             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9549             min_allocable_bytes <= sinfo->total_bytes) {
9550                 sinfo->bytes_readonly += num_bytes;
9551                 cache->ro++;
9552                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9553                 ret = 0;
9554         }
9555 out:
9556         spin_unlock(&cache->lock);
9557         spin_unlock(&sinfo->lock);
9558         return ret;
9559 }
9560
9561 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9562                              struct btrfs_block_group_cache *cache)
9563
9564 {
9565         struct btrfs_trans_handle *trans;
9566         u64 alloc_flags;
9567         int ret;
9568
9569 again:
9570         trans = btrfs_join_transaction(root);
9571         if (IS_ERR(trans))
9572                 return PTR_ERR(trans);
9573
9574         /*
9575          * we're not allowed to set block groups readonly after the dirty
9576          * block groups cache has started writing.  If it already started,
9577          * back off and let this transaction commit
9578          */
9579         mutex_lock(&root->fs_info->ro_block_group_mutex);
9580         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9581                 u64 transid = trans->transid;
9582
9583                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9584                 btrfs_end_transaction(trans, root);
9585
9586                 ret = btrfs_wait_for_commit(root, transid);
9587                 if (ret)
9588                         return ret;
9589                 goto again;
9590         }
9591
9592         /*
9593          * if we are changing raid levels, try to allocate a corresponding
9594          * block group with the new raid level.
9595          */
9596         alloc_flags = update_block_group_flags(root, cache->flags);
9597         if (alloc_flags != cache->flags) {
9598                 ret = do_chunk_alloc(trans, root, alloc_flags,
9599                                      CHUNK_ALLOC_FORCE);
9600                 /*
9601                  * ENOSPC is allowed here, we may have enough space
9602                  * already allocated at the new raid level to
9603                  * carry on
9604                  */
9605                 if (ret == -ENOSPC)
9606                         ret = 0;
9607                 if (ret < 0)
9608                         goto out;
9609         }
9610
9611         ret = inc_block_group_ro(cache, 0);
9612         if (!ret)
9613                 goto out;
9614         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9615         ret = do_chunk_alloc(trans, root, alloc_flags,
9616                              CHUNK_ALLOC_FORCE);
9617         if (ret < 0)
9618                 goto out;
9619         ret = inc_block_group_ro(cache, 0);
9620 out:
9621         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9622                 alloc_flags = update_block_group_flags(root, cache->flags);
9623                 lock_chunks(root->fs_info->chunk_root);
9624                 check_system_chunk(trans, root, alloc_flags);
9625                 unlock_chunks(root->fs_info->chunk_root);
9626         }
9627         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9628
9629         btrfs_end_transaction(trans, root);
9630         return ret;
9631 }
9632
9633 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9634                             struct btrfs_root *root, u64 type)
9635 {
9636         u64 alloc_flags = get_alloc_profile(root, type);
9637         return do_chunk_alloc(trans, root, alloc_flags,
9638                               CHUNK_ALLOC_FORCE);
9639 }
9640
9641 /*
9642  * helper to account the unused space of all the readonly block group in the
9643  * space_info. takes mirrors into account.
9644  */
9645 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9646 {
9647         struct btrfs_block_group_cache *block_group;
9648         u64 free_bytes = 0;
9649         int factor;
9650
9651         /* It's df, we don't care if it's racy */
9652         if (list_empty(&sinfo->ro_bgs))
9653                 return 0;
9654
9655         spin_lock(&sinfo->lock);
9656         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9657                 spin_lock(&block_group->lock);
9658
9659                 if (!block_group->ro) {
9660                         spin_unlock(&block_group->lock);
9661                         continue;
9662                 }
9663
9664                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9665                                           BTRFS_BLOCK_GROUP_RAID10 |
9666                                           BTRFS_BLOCK_GROUP_DUP))
9667                         factor = 2;
9668                 else
9669                         factor = 1;
9670
9671                 free_bytes += (block_group->key.offset -
9672                                btrfs_block_group_used(&block_group->item)) *
9673                                factor;
9674
9675                 spin_unlock(&block_group->lock);
9676         }
9677         spin_unlock(&sinfo->lock);
9678
9679         return free_bytes;
9680 }
9681
9682 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9683                               struct btrfs_block_group_cache *cache)
9684 {
9685         struct btrfs_space_info *sinfo = cache->space_info;
9686         u64 num_bytes;
9687
9688         BUG_ON(!cache->ro);
9689
9690         spin_lock(&sinfo->lock);
9691         spin_lock(&cache->lock);
9692         if (!--cache->ro) {
9693                 num_bytes = cache->key.offset - cache->reserved -
9694                             cache->pinned - cache->bytes_super -
9695                             btrfs_block_group_used(&cache->item);
9696                 sinfo->bytes_readonly -= num_bytes;
9697                 list_del_init(&cache->ro_list);
9698         }
9699         spin_unlock(&cache->lock);
9700         spin_unlock(&sinfo->lock);
9701 }
9702
9703 /*
9704  * checks to see if its even possible to relocate this block group.
9705  *
9706  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9707  * ok to go ahead and try.
9708  */
9709 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9710 {
9711         struct btrfs_block_group_cache *block_group;
9712         struct btrfs_space_info *space_info;
9713         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9714         struct btrfs_device *device;
9715         struct btrfs_trans_handle *trans;
9716         u64 min_free;
9717         u64 dev_min = 1;
9718         u64 dev_nr = 0;
9719         u64 target;
9720         int debug;
9721         int index;
9722         int full = 0;
9723         int ret = 0;
9724
9725         debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
9726
9727         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9728
9729         /* odd, couldn't find the block group, leave it alone */
9730         if (!block_group) {
9731                 if (debug)
9732                         btrfs_warn(root->fs_info,
9733                                    "can't find block group for bytenr %llu",
9734                                    bytenr);
9735                 return -1;
9736         }
9737
9738         min_free = btrfs_block_group_used(&block_group->item);
9739
9740         /* no bytes used, we're good */
9741         if (!min_free)
9742                 goto out;
9743
9744         space_info = block_group->space_info;
9745         spin_lock(&space_info->lock);
9746
9747         full = space_info->full;
9748
9749         /*
9750          * if this is the last block group we have in this space, we can't
9751          * relocate it unless we're able to allocate a new chunk below.
9752          *
9753          * Otherwise, we need to make sure we have room in the space to handle
9754          * all of the extents from this block group.  If we can, we're good
9755          */
9756         if ((space_info->total_bytes != block_group->key.offset) &&
9757             (space_info->bytes_used + space_info->bytes_reserved +
9758              space_info->bytes_pinned + space_info->bytes_readonly +
9759              min_free < space_info->total_bytes)) {
9760                 spin_unlock(&space_info->lock);
9761                 goto out;
9762         }
9763         spin_unlock(&space_info->lock);
9764
9765         /*
9766          * ok we don't have enough space, but maybe we have free space on our
9767          * devices to allocate new chunks for relocation, so loop through our
9768          * alloc devices and guess if we have enough space.  if this block
9769          * group is going to be restriped, run checks against the target
9770          * profile instead of the current one.
9771          */
9772         ret = -1;
9773
9774         /*
9775          * index:
9776          *      0: raid10
9777          *      1: raid1
9778          *      2: dup
9779          *      3: raid0
9780          *      4: single
9781          */
9782         target = get_restripe_target(root->fs_info, block_group->flags);
9783         if (target) {
9784                 index = __get_raid_index(extended_to_chunk(target));
9785         } else {
9786                 /*
9787                  * this is just a balance, so if we were marked as full
9788                  * we know there is no space for a new chunk
9789                  */
9790                 if (full) {
9791                         if (debug)
9792                                 btrfs_warn(root->fs_info,
9793                                         "no space to alloc new chunk for block group %llu",
9794                                         block_group->key.objectid);
9795                         goto out;
9796                 }
9797
9798                 index = get_block_group_index(block_group);
9799         }
9800
9801         if (index == BTRFS_RAID_RAID10) {
9802                 dev_min = 4;
9803                 /* Divide by 2 */
9804                 min_free >>= 1;
9805         } else if (index == BTRFS_RAID_RAID1) {
9806                 dev_min = 2;
9807         } else if (index == BTRFS_RAID_DUP) {
9808                 /* Multiply by 2 */
9809                 min_free <<= 1;
9810         } else if (index == BTRFS_RAID_RAID0) {
9811                 dev_min = fs_devices->rw_devices;
9812                 min_free = div64_u64(min_free, dev_min);
9813         }
9814
9815         /* We need to do this so that we can look at pending chunks */
9816         trans = btrfs_join_transaction(root);
9817         if (IS_ERR(trans)) {
9818                 ret = PTR_ERR(trans);
9819                 goto out;
9820         }
9821
9822         mutex_lock(&root->fs_info->chunk_mutex);
9823         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9824                 u64 dev_offset;
9825
9826                 /*
9827                  * check to make sure we can actually find a chunk with enough
9828                  * space to fit our block group in.
9829                  */
9830                 if (device->total_bytes > device->bytes_used + min_free &&
9831                     !device->is_tgtdev_for_dev_replace) {
9832                         ret = find_free_dev_extent(trans, device, min_free,
9833                                                    &dev_offset, NULL);
9834                         if (!ret)
9835                                 dev_nr++;
9836
9837                         if (dev_nr >= dev_min)
9838                                 break;
9839
9840                         ret = -1;
9841                 }
9842         }
9843         if (debug && ret == -1)
9844                 btrfs_warn(root->fs_info,
9845                         "no space to allocate a new chunk for block group %llu",
9846                         block_group->key.objectid);
9847         mutex_unlock(&root->fs_info->chunk_mutex);
9848         btrfs_end_transaction(trans, root);
9849 out:
9850         btrfs_put_block_group(block_group);
9851         return ret;
9852 }
9853
9854 static int find_first_block_group(struct btrfs_root *root,
9855                 struct btrfs_path *path, struct btrfs_key *key)
9856 {
9857         int ret = 0;
9858         struct btrfs_key found_key;
9859         struct extent_buffer *leaf;
9860         int slot;
9861
9862         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9863         if (ret < 0)
9864                 goto out;
9865
9866         while (1) {
9867                 slot = path->slots[0];
9868                 leaf = path->nodes[0];
9869                 if (slot >= btrfs_header_nritems(leaf)) {
9870                         ret = btrfs_next_leaf(root, path);
9871                         if (ret == 0)
9872                                 continue;
9873                         if (ret < 0)
9874                                 goto out;
9875                         break;
9876                 }
9877                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9878
9879                 if (found_key.objectid >= key->objectid &&
9880                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9881                         struct extent_map_tree *em_tree;
9882                         struct extent_map *em;
9883
9884                         em_tree = &root->fs_info->mapping_tree.map_tree;
9885                         read_lock(&em_tree->lock);
9886                         em = lookup_extent_mapping(em_tree, found_key.objectid,
9887                                                    found_key.offset);
9888                         read_unlock(&em_tree->lock);
9889                         if (!em) {
9890                                 btrfs_err(root->fs_info,
9891                         "logical %llu len %llu found bg but no related chunk",
9892                                           found_key.objectid, found_key.offset);
9893                                 ret = -ENOENT;
9894                         } else {
9895                                 ret = 0;
9896                         }
9897                         free_extent_map(em);
9898                         goto out;
9899                 }
9900                 path->slots[0]++;
9901         }
9902 out:
9903         return ret;
9904 }
9905
9906 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9907 {
9908         struct btrfs_block_group_cache *block_group;
9909         u64 last = 0;
9910
9911         while (1) {
9912                 struct inode *inode;
9913
9914                 block_group = btrfs_lookup_first_block_group(info, last);
9915                 while (block_group) {
9916                         spin_lock(&block_group->lock);
9917                         if (block_group->iref)
9918                                 break;
9919                         spin_unlock(&block_group->lock);
9920                         block_group = next_block_group(info->tree_root,
9921                                                        block_group);
9922                 }
9923                 if (!block_group) {
9924                         if (last == 0)
9925                                 break;
9926                         last = 0;
9927                         continue;
9928                 }
9929
9930                 inode = block_group->inode;
9931                 block_group->iref = 0;
9932                 block_group->inode = NULL;
9933                 spin_unlock(&block_group->lock);
9934                 ASSERT(block_group->io_ctl.inode == NULL);
9935                 iput(inode);
9936                 last = block_group->key.objectid + block_group->key.offset;
9937                 btrfs_put_block_group(block_group);
9938         }
9939 }
9940
9941 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9942 {
9943         struct btrfs_block_group_cache *block_group;
9944         struct btrfs_space_info *space_info;
9945         struct btrfs_caching_control *caching_ctl;
9946         struct rb_node *n;
9947
9948         down_write(&info->commit_root_sem);
9949         while (!list_empty(&info->caching_block_groups)) {
9950                 caching_ctl = list_entry(info->caching_block_groups.next,
9951                                          struct btrfs_caching_control, list);
9952                 list_del(&caching_ctl->list);
9953                 put_caching_control(caching_ctl);
9954         }
9955         up_write(&info->commit_root_sem);
9956
9957         spin_lock(&info->unused_bgs_lock);
9958         while (!list_empty(&info->unused_bgs)) {
9959                 block_group = list_first_entry(&info->unused_bgs,
9960                                                struct btrfs_block_group_cache,
9961                                                bg_list);
9962                 list_del_init(&block_group->bg_list);
9963                 btrfs_put_block_group(block_group);
9964         }
9965         spin_unlock(&info->unused_bgs_lock);
9966
9967         spin_lock(&info->block_group_cache_lock);
9968         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9969                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9970                                        cache_node);
9971                 rb_erase(&block_group->cache_node,
9972                          &info->block_group_cache_tree);
9973                 RB_CLEAR_NODE(&block_group->cache_node);
9974                 spin_unlock(&info->block_group_cache_lock);
9975
9976                 down_write(&block_group->space_info->groups_sem);
9977                 list_del(&block_group->list);
9978                 up_write(&block_group->space_info->groups_sem);
9979
9980                 if (block_group->cached == BTRFS_CACHE_STARTED)
9981                         wait_block_group_cache_done(block_group);
9982
9983                 /*
9984                  * We haven't cached this block group, which means we could
9985                  * possibly have excluded extents on this block group.
9986                  */
9987                 if (block_group->cached == BTRFS_CACHE_NO ||
9988                     block_group->cached == BTRFS_CACHE_ERROR)
9989                         free_excluded_extents(info->extent_root, block_group);
9990
9991                 btrfs_remove_free_space_cache(block_group);
9992                 ASSERT(list_empty(&block_group->dirty_list));
9993                 ASSERT(list_empty(&block_group->io_list));
9994                 ASSERT(list_empty(&block_group->bg_list));
9995                 ASSERT(atomic_read(&block_group->count) == 1);
9996                 btrfs_put_block_group(block_group);
9997
9998                 spin_lock(&info->block_group_cache_lock);
9999         }
10000         spin_unlock(&info->block_group_cache_lock);
10001
10002         /* now that all the block groups are freed, go through and
10003          * free all the space_info structs.  This is only called during
10004          * the final stages of unmount, and so we know nobody is
10005          * using them.  We call synchronize_rcu() once before we start,
10006          * just to be on the safe side.
10007          */
10008         synchronize_rcu();
10009
10010         release_global_block_rsv(info);
10011
10012         while (!list_empty(&info->space_info)) {
10013                 int i;
10014
10015                 space_info = list_entry(info->space_info.next,
10016                                         struct btrfs_space_info,
10017                                         list);
10018
10019                 /*
10020                  * Do not hide this behind enospc_debug, this is actually
10021                  * important and indicates a real bug if this happens.
10022                  */
10023                 if (WARN_ON(space_info->bytes_pinned > 0 ||
10024                             space_info->bytes_reserved > 0 ||
10025                             space_info->bytes_may_use > 0))
10026                         dump_space_info(space_info, 0, 0);
10027                 list_del(&space_info->list);
10028                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
10029                         struct kobject *kobj;
10030                         kobj = space_info->block_group_kobjs[i];
10031                         space_info->block_group_kobjs[i] = NULL;
10032                         if (kobj) {
10033                                 kobject_del(kobj);
10034                                 kobject_put(kobj);
10035                         }
10036                 }
10037                 kobject_del(&space_info->kobj);
10038                 kobject_put(&space_info->kobj);
10039         }
10040         return 0;
10041 }
10042
10043 static void __link_block_group(struct btrfs_space_info *space_info,
10044                                struct btrfs_block_group_cache *cache)
10045 {
10046         int index = get_block_group_index(cache);
10047         bool first = false;
10048
10049         down_write(&space_info->groups_sem);
10050         if (list_empty(&space_info->block_groups[index]))
10051                 first = true;
10052         list_add_tail(&cache->list, &space_info->block_groups[index]);
10053         up_write(&space_info->groups_sem);
10054
10055         if (first) {
10056                 struct raid_kobject *rkobj;
10057                 int ret;
10058
10059                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
10060                 if (!rkobj)
10061                         goto out_err;
10062                 rkobj->raid_type = index;
10063                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
10064                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
10065                                   "%s", get_raid_name(index));
10066                 if (ret) {
10067                         kobject_put(&rkobj->kobj);
10068                         goto out_err;
10069                 }
10070                 space_info->block_group_kobjs[index] = &rkobj->kobj;
10071         }
10072
10073         return;
10074 out_err:
10075         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
10076 }
10077
10078 static struct btrfs_block_group_cache *
10079 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
10080 {
10081         struct btrfs_block_group_cache *cache;
10082
10083         cache = kzalloc(sizeof(*cache), GFP_NOFS);
10084         if (!cache)
10085                 return NULL;
10086
10087         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
10088                                         GFP_NOFS);
10089         if (!cache->free_space_ctl) {
10090                 kfree(cache);
10091                 return NULL;
10092         }
10093
10094         cache->key.objectid = start;
10095         cache->key.offset = size;
10096         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10097
10098         cache->sectorsize = root->sectorsize;
10099         cache->fs_info = root->fs_info;
10100         cache->full_stripe_len = btrfs_full_stripe_len(root,
10101                                                &root->fs_info->mapping_tree,
10102                                                start);
10103         set_free_space_tree_thresholds(cache);
10104
10105         atomic_set(&cache->count, 1);
10106         spin_lock_init(&cache->lock);
10107         init_rwsem(&cache->data_rwsem);
10108         INIT_LIST_HEAD(&cache->list);
10109         INIT_LIST_HEAD(&cache->cluster_list);
10110         INIT_LIST_HEAD(&cache->bg_list);
10111         INIT_LIST_HEAD(&cache->ro_list);
10112         INIT_LIST_HEAD(&cache->dirty_list);
10113         INIT_LIST_HEAD(&cache->io_list);
10114         btrfs_init_free_space_ctl(cache);
10115         atomic_set(&cache->trimming, 0);
10116         mutex_init(&cache->free_space_lock);
10117
10118         return cache;
10119 }
10120
10121 int btrfs_read_block_groups(struct btrfs_root *root)
10122 {
10123         struct btrfs_path *path;
10124         int ret;
10125         struct btrfs_block_group_cache *cache;
10126         struct btrfs_fs_info *info = root->fs_info;
10127         struct btrfs_space_info *space_info;
10128         struct btrfs_key key;
10129         struct btrfs_key found_key;
10130         struct extent_buffer *leaf;
10131         int need_clear = 0;
10132         u64 cache_gen;
10133
10134         root = info->extent_root;
10135         key.objectid = 0;
10136         key.offset = 0;
10137         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10138         path = btrfs_alloc_path();
10139         if (!path)
10140                 return -ENOMEM;
10141         path->reada = READA_FORWARD;
10142
10143         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
10144         if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
10145             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
10146                 need_clear = 1;
10147         if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
10148                 need_clear = 1;
10149
10150         while (1) {
10151                 ret = find_first_block_group(root, path, &key);
10152                 if (ret > 0)
10153                         break;
10154                 if (ret != 0)
10155                         goto error;
10156
10157                 leaf = path->nodes[0];
10158                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10159
10160                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
10161                                                        found_key.offset);
10162                 if (!cache) {
10163                         ret = -ENOMEM;
10164                         goto error;
10165                 }
10166
10167                 if (need_clear) {
10168                         /*
10169                          * When we mount with old space cache, we need to
10170                          * set BTRFS_DC_CLEAR and set dirty flag.
10171                          *
10172                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
10173                          *    truncate the old free space cache inode and
10174                          *    setup a new one.
10175                          * b) Setting 'dirty flag' makes sure that we flush
10176                          *    the new space cache info onto disk.
10177                          */
10178                         if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
10179                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
10180                 }
10181
10182                 read_extent_buffer(leaf, &cache->item,
10183                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
10184                                    sizeof(cache->item));
10185                 cache->flags = btrfs_block_group_flags(&cache->item);
10186
10187                 key.objectid = found_key.objectid + found_key.offset;
10188                 btrfs_release_path(path);
10189
10190                 /*
10191                  * We need to exclude the super stripes now so that the space
10192                  * info has super bytes accounted for, otherwise we'll think
10193                  * we have more space than we actually do.
10194                  */
10195                 ret = exclude_super_stripes(root, cache);
10196                 if (ret) {
10197                         /*
10198                          * We may have excluded something, so call this just in
10199                          * case.
10200                          */
10201                         free_excluded_extents(root, cache);
10202                         btrfs_put_block_group(cache);
10203                         goto error;
10204                 }
10205
10206                 /*
10207                  * check for two cases, either we are full, and therefore
10208                  * don't need to bother with the caching work since we won't
10209                  * find any space, or we are empty, and we can just add all
10210                  * the space in and be done with it.  This saves us _alot_ of
10211                  * time, particularly in the full case.
10212                  */
10213                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10214                         cache->last_byte_to_unpin = (u64)-1;
10215                         cache->cached = BTRFS_CACHE_FINISHED;
10216                         free_excluded_extents(root, cache);
10217                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10218                         cache->last_byte_to_unpin = (u64)-1;
10219                         cache->cached = BTRFS_CACHE_FINISHED;
10220                         add_new_free_space(cache, root->fs_info,
10221                                            found_key.objectid,
10222                                            found_key.objectid +
10223                                            found_key.offset);
10224                         free_excluded_extents(root, cache);
10225                 }
10226
10227                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
10228                 if (ret) {
10229                         btrfs_remove_free_space_cache(cache);
10230                         btrfs_put_block_group(cache);
10231                         goto error;
10232                 }
10233
10234                 trace_btrfs_add_block_group(root->fs_info, cache, 0);
10235                 ret = update_space_info(info, cache->flags, found_key.offset,
10236                                         btrfs_block_group_used(&cache->item),
10237                                         cache->bytes_super, &space_info);
10238                 if (ret) {
10239                         btrfs_remove_free_space_cache(cache);
10240                         spin_lock(&info->block_group_cache_lock);
10241                         rb_erase(&cache->cache_node,
10242                                  &info->block_group_cache_tree);
10243                         RB_CLEAR_NODE(&cache->cache_node);
10244                         spin_unlock(&info->block_group_cache_lock);
10245                         btrfs_put_block_group(cache);
10246                         goto error;
10247                 }
10248
10249                 cache->space_info = space_info;
10250
10251                 __link_block_group(space_info, cache);
10252
10253                 set_avail_alloc_bits(root->fs_info, cache->flags);
10254                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
10255                         inc_block_group_ro(cache, 1);
10256                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10257                         spin_lock(&info->unused_bgs_lock);
10258                         /* Should always be true but just in case. */
10259                         if (list_empty(&cache->bg_list)) {
10260                                 btrfs_get_block_group(cache);
10261                                 list_add_tail(&cache->bg_list,
10262                                               &info->unused_bgs);
10263                         }
10264                         spin_unlock(&info->unused_bgs_lock);
10265                 }
10266         }
10267
10268         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
10269                 if (!(get_alloc_profile(root, space_info->flags) &
10270                       (BTRFS_BLOCK_GROUP_RAID10 |
10271                        BTRFS_BLOCK_GROUP_RAID1 |
10272                        BTRFS_BLOCK_GROUP_RAID5 |
10273                        BTRFS_BLOCK_GROUP_RAID6 |
10274                        BTRFS_BLOCK_GROUP_DUP)))
10275                         continue;
10276                 /*
10277                  * avoid allocating from un-mirrored block group if there are
10278                  * mirrored block groups.
10279                  */
10280                 list_for_each_entry(cache,
10281                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10282                                 list)
10283                         inc_block_group_ro(cache, 1);
10284                 list_for_each_entry(cache,
10285                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10286                                 list)
10287                         inc_block_group_ro(cache, 1);
10288         }
10289
10290         init_global_block_rsv(info);
10291         ret = 0;
10292 error:
10293         btrfs_free_path(path);
10294         return ret;
10295 }
10296
10297 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10298                                        struct btrfs_root *root)
10299 {
10300         struct btrfs_block_group_cache *block_group, *tmp;
10301         struct btrfs_root *extent_root = root->fs_info->extent_root;
10302         struct btrfs_block_group_item item;
10303         struct btrfs_key key;
10304         int ret = 0;
10305         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10306
10307         trans->can_flush_pending_bgs = false;
10308         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
10309                 if (ret)
10310                         goto next;
10311
10312                 spin_lock(&block_group->lock);
10313                 memcpy(&item, &block_group->item, sizeof(item));
10314                 memcpy(&key, &block_group->key, sizeof(key));
10315                 spin_unlock(&block_group->lock);
10316
10317                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10318                                         sizeof(item));
10319                 if (ret)
10320                         btrfs_abort_transaction(trans, ret);
10321                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
10322                                                key.objectid, key.offset);
10323                 if (ret)
10324                         btrfs_abort_transaction(trans, ret);
10325                 add_block_group_free_space(trans, root->fs_info, block_group);
10326                 /* already aborted the transaction if it failed. */
10327 next:
10328                 list_del_init(&block_group->bg_list);
10329         }
10330         trans->can_flush_pending_bgs = can_flush_pending_bgs;
10331 }
10332
10333 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10334                            struct btrfs_root *root, u64 bytes_used,
10335                            u64 type, u64 chunk_objectid, u64 chunk_offset,
10336                            u64 size)
10337 {
10338         int ret;
10339         struct btrfs_root *extent_root;
10340         struct btrfs_block_group_cache *cache;
10341         extent_root = root->fs_info->extent_root;
10342
10343         btrfs_set_log_full_commit(root->fs_info, trans);
10344
10345         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
10346         if (!cache)
10347                 return -ENOMEM;
10348
10349         btrfs_set_block_group_used(&cache->item, bytes_used);
10350         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10351         btrfs_set_block_group_flags(&cache->item, type);
10352
10353         cache->flags = type;
10354         cache->last_byte_to_unpin = (u64)-1;
10355         cache->cached = BTRFS_CACHE_FINISHED;
10356         cache->needs_free_space = 1;
10357         ret = exclude_super_stripes(root, cache);
10358         if (ret) {
10359                 /*
10360                  * We may have excluded something, so call this just in
10361                  * case.
10362                  */
10363                 free_excluded_extents(root, cache);
10364                 btrfs_put_block_group(cache);
10365                 return ret;
10366         }
10367
10368         add_new_free_space(cache, root->fs_info, chunk_offset,
10369                            chunk_offset + size);
10370
10371         free_excluded_extents(root, cache);
10372
10373 #ifdef CONFIG_BTRFS_DEBUG
10374         if (btrfs_should_fragment_free_space(root, cache)) {
10375                 u64 new_bytes_used = size - bytes_used;
10376
10377                 bytes_used += new_bytes_used >> 1;
10378                 fragment_free_space(root, cache);
10379         }
10380 #endif
10381         /*
10382          * Call to ensure the corresponding space_info object is created and
10383          * assigned to our block group, but don't update its counters just yet.
10384          * We want our bg to be added to the rbtree with its ->space_info set.
10385          */
10386         ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
10387                                 &cache->space_info);
10388         if (ret) {
10389                 btrfs_remove_free_space_cache(cache);
10390                 btrfs_put_block_group(cache);
10391                 return ret;
10392         }
10393
10394         ret = btrfs_add_block_group_cache(root->fs_info, cache);
10395         if (ret) {
10396                 btrfs_remove_free_space_cache(cache);
10397                 btrfs_put_block_group(cache);
10398                 return ret;
10399         }
10400
10401         /*
10402          * Now that our block group has its ->space_info set and is inserted in
10403          * the rbtree, update the space info's counters.
10404          */
10405         trace_btrfs_add_block_group(root->fs_info, cache, 1);
10406         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10407                                 cache->bytes_super, &cache->space_info);
10408         if (ret) {
10409                 btrfs_remove_free_space_cache(cache);
10410                 spin_lock(&root->fs_info->block_group_cache_lock);
10411                 rb_erase(&cache->cache_node,
10412                          &root->fs_info->block_group_cache_tree);
10413                 RB_CLEAR_NODE(&cache->cache_node);
10414                 spin_unlock(&root->fs_info->block_group_cache_lock);
10415                 btrfs_put_block_group(cache);
10416                 return ret;
10417         }
10418         update_global_block_rsv(root->fs_info);
10419
10420         __link_block_group(cache->space_info, cache);
10421
10422         list_add_tail(&cache->bg_list, &trans->new_bgs);
10423
10424         set_avail_alloc_bits(extent_root->fs_info, type);
10425         return 0;
10426 }
10427
10428 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10429 {
10430         u64 extra_flags = chunk_to_extended(flags) &
10431                                 BTRFS_EXTENDED_PROFILE_MASK;
10432
10433         write_seqlock(&fs_info->profiles_lock);
10434         if (flags & BTRFS_BLOCK_GROUP_DATA)
10435                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10436         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10437                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10438         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10439                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10440         write_sequnlock(&fs_info->profiles_lock);
10441 }
10442
10443 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10444                              struct btrfs_root *root, u64 group_start,
10445                              struct extent_map *em)
10446 {
10447         struct btrfs_path *path;
10448         struct btrfs_block_group_cache *block_group;
10449         struct btrfs_free_cluster *cluster;
10450         struct btrfs_root *tree_root = root->fs_info->tree_root;
10451         struct btrfs_key key;
10452         struct inode *inode;
10453         struct kobject *kobj = NULL;
10454         int ret;
10455         int index;
10456         int factor;
10457         struct btrfs_caching_control *caching_ctl = NULL;
10458         bool remove_em;
10459
10460         root = root->fs_info->extent_root;
10461
10462         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10463         BUG_ON(!block_group);
10464         BUG_ON(!block_group->ro);
10465
10466         /*
10467          * Free the reserved super bytes from this block group before
10468          * remove it.
10469          */
10470         free_excluded_extents(root, block_group);
10471
10472         memcpy(&key, &block_group->key, sizeof(key));
10473         index = get_block_group_index(block_group);
10474         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10475                                   BTRFS_BLOCK_GROUP_RAID1 |
10476                                   BTRFS_BLOCK_GROUP_RAID10))
10477                 factor = 2;
10478         else
10479                 factor = 1;
10480
10481         /* make sure this block group isn't part of an allocation cluster */
10482         cluster = &root->fs_info->data_alloc_cluster;
10483         spin_lock(&cluster->refill_lock);
10484         btrfs_return_cluster_to_free_space(block_group, cluster);
10485         spin_unlock(&cluster->refill_lock);
10486
10487         /*
10488          * make sure this block group isn't part of a metadata
10489          * allocation cluster
10490          */
10491         cluster = &root->fs_info->meta_alloc_cluster;
10492         spin_lock(&cluster->refill_lock);
10493         btrfs_return_cluster_to_free_space(block_group, cluster);
10494         spin_unlock(&cluster->refill_lock);
10495
10496         path = btrfs_alloc_path();
10497         if (!path) {
10498                 ret = -ENOMEM;
10499                 goto out;
10500         }
10501
10502         /*
10503          * get the inode first so any iput calls done for the io_list
10504          * aren't the final iput (no unlinks allowed now)
10505          */
10506         inode = lookup_free_space_inode(tree_root, block_group, path);
10507
10508         mutex_lock(&trans->transaction->cache_write_mutex);
10509         /*
10510          * make sure our free spache cache IO is done before remove the
10511          * free space inode
10512          */
10513         spin_lock(&trans->transaction->dirty_bgs_lock);
10514         if (!list_empty(&block_group->io_list)) {
10515                 list_del_init(&block_group->io_list);
10516
10517                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10518
10519                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10520                 btrfs_wait_cache_io(root, trans, block_group,
10521                                     &block_group->io_ctl, path,
10522                                     block_group->key.objectid);
10523                 btrfs_put_block_group(block_group);
10524                 spin_lock(&trans->transaction->dirty_bgs_lock);
10525         }
10526
10527         if (!list_empty(&block_group->dirty_list)) {
10528                 list_del_init(&block_group->dirty_list);
10529                 btrfs_put_block_group(block_group);
10530         }
10531         spin_unlock(&trans->transaction->dirty_bgs_lock);
10532         mutex_unlock(&trans->transaction->cache_write_mutex);
10533
10534         if (!IS_ERR(inode)) {
10535                 ret = btrfs_orphan_add(trans, inode);
10536                 if (ret) {
10537                         btrfs_add_delayed_iput(inode);
10538                         goto out;
10539                 }
10540                 clear_nlink(inode);
10541                 /* One for the block groups ref */
10542                 spin_lock(&block_group->lock);
10543                 if (block_group->iref) {
10544                         block_group->iref = 0;
10545                         block_group->inode = NULL;
10546                         spin_unlock(&block_group->lock);
10547                         iput(inode);
10548                 } else {
10549                         spin_unlock(&block_group->lock);
10550                 }
10551                 /* One for our lookup ref */
10552                 btrfs_add_delayed_iput(inode);
10553         }
10554
10555         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10556         key.offset = block_group->key.objectid;
10557         key.type = 0;
10558
10559         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10560         if (ret < 0)
10561                 goto out;
10562         if (ret > 0)
10563                 btrfs_release_path(path);
10564         if (ret == 0) {
10565                 ret = btrfs_del_item(trans, tree_root, path);
10566                 if (ret)
10567                         goto out;
10568                 btrfs_release_path(path);
10569         }
10570
10571         spin_lock(&root->fs_info->block_group_cache_lock);
10572         rb_erase(&block_group->cache_node,
10573                  &root->fs_info->block_group_cache_tree);
10574         RB_CLEAR_NODE(&block_group->cache_node);
10575
10576         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10577                 root->fs_info->first_logical_byte = (u64)-1;
10578         spin_unlock(&root->fs_info->block_group_cache_lock);
10579
10580         down_write(&block_group->space_info->groups_sem);
10581         /*
10582          * we must use list_del_init so people can check to see if they
10583          * are still on the list after taking the semaphore
10584          */
10585         list_del_init(&block_group->list);
10586         if (list_empty(&block_group->space_info->block_groups[index])) {
10587                 kobj = block_group->space_info->block_group_kobjs[index];
10588                 block_group->space_info->block_group_kobjs[index] = NULL;
10589                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10590         }
10591         up_write(&block_group->space_info->groups_sem);
10592         if (kobj) {
10593                 kobject_del(kobj);
10594                 kobject_put(kobj);
10595         }
10596
10597         if (block_group->has_caching_ctl)
10598                 caching_ctl = get_caching_control(block_group);
10599         if (block_group->cached == BTRFS_CACHE_STARTED)
10600                 wait_block_group_cache_done(block_group);
10601         if (block_group->has_caching_ctl) {
10602                 down_write(&root->fs_info->commit_root_sem);
10603                 if (!caching_ctl) {
10604                         struct btrfs_caching_control *ctl;
10605
10606                         list_for_each_entry(ctl,
10607                                     &root->fs_info->caching_block_groups, list)
10608                                 if (ctl->block_group == block_group) {
10609                                         caching_ctl = ctl;
10610                                         atomic_inc(&caching_ctl->count);
10611                                         break;
10612                                 }
10613                 }
10614                 if (caching_ctl)
10615                         list_del_init(&caching_ctl->list);
10616                 up_write(&root->fs_info->commit_root_sem);
10617                 if (caching_ctl) {
10618                         /* Once for the caching bgs list and once for us. */
10619                         put_caching_control(caching_ctl);
10620                         put_caching_control(caching_ctl);
10621                 }
10622         }
10623
10624         spin_lock(&trans->transaction->dirty_bgs_lock);
10625         if (!list_empty(&block_group->dirty_list)) {
10626                 WARN_ON(1);
10627         }
10628         if (!list_empty(&block_group->io_list)) {
10629                 WARN_ON(1);
10630         }
10631         spin_unlock(&trans->transaction->dirty_bgs_lock);
10632         btrfs_remove_free_space_cache(block_group);
10633
10634         spin_lock(&block_group->space_info->lock);
10635         list_del_init(&block_group->ro_list);
10636
10637         if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
10638                 WARN_ON(block_group->space_info->total_bytes
10639                         < block_group->key.offset);
10640                 WARN_ON(block_group->space_info->bytes_readonly
10641                         < block_group->key.offset);
10642                 WARN_ON(block_group->space_info->disk_total
10643                         < block_group->key.offset * factor);
10644         }
10645         block_group->space_info->total_bytes -= block_group->key.offset;
10646         block_group->space_info->bytes_readonly -= block_group->key.offset;
10647         block_group->space_info->disk_total -= block_group->key.offset * factor;
10648
10649         spin_unlock(&block_group->space_info->lock);
10650
10651         memcpy(&key, &block_group->key, sizeof(key));
10652
10653         lock_chunks(root);
10654         if (!list_empty(&em->list)) {
10655                 /* We're in the transaction->pending_chunks list. */
10656                 free_extent_map(em);
10657         }
10658         spin_lock(&block_group->lock);
10659         block_group->removed = 1;
10660         /*
10661          * At this point trimming can't start on this block group, because we
10662          * removed the block group from the tree fs_info->block_group_cache_tree
10663          * so no one can't find it anymore and even if someone already got this
10664          * block group before we removed it from the rbtree, they have already
10665          * incremented block_group->trimming - if they didn't, they won't find
10666          * any free space entries because we already removed them all when we
10667          * called btrfs_remove_free_space_cache().
10668          *
10669          * And we must not remove the extent map from the fs_info->mapping_tree
10670          * to prevent the same logical address range and physical device space
10671          * ranges from being reused for a new block group. This is because our
10672          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10673          * completely transactionless, so while it is trimming a range the
10674          * currently running transaction might finish and a new one start,
10675          * allowing for new block groups to be created that can reuse the same
10676          * physical device locations unless we take this special care.
10677          *
10678          * There may also be an implicit trim operation if the file system
10679          * is mounted with -odiscard. The same protections must remain
10680          * in place until the extents have been discarded completely when
10681          * the transaction commit has completed.
10682          */
10683         remove_em = (atomic_read(&block_group->trimming) == 0);
10684         /*
10685          * Make sure a trimmer task always sees the em in the pinned_chunks list
10686          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10687          * before checking block_group->removed).
10688          */
10689         if (!remove_em) {
10690                 /*
10691                  * Our em might be in trans->transaction->pending_chunks which
10692                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10693                  * and so is the fs_info->pinned_chunks list.
10694                  *
10695                  * So at this point we must be holding the chunk_mutex to avoid
10696                  * any races with chunk allocation (more specifically at
10697                  * volumes.c:contains_pending_extent()), to ensure it always
10698                  * sees the em, either in the pending_chunks list or in the
10699                  * pinned_chunks list.
10700                  */
10701                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10702         }
10703         spin_unlock(&block_group->lock);
10704
10705         if (remove_em) {
10706                 struct extent_map_tree *em_tree;
10707
10708                 em_tree = &root->fs_info->mapping_tree.map_tree;
10709                 write_lock(&em_tree->lock);
10710                 /*
10711                  * The em might be in the pending_chunks list, so make sure the
10712                  * chunk mutex is locked, since remove_extent_mapping() will
10713                  * delete us from that list.
10714                  */
10715                 remove_extent_mapping(em_tree, em);
10716                 write_unlock(&em_tree->lock);
10717                 /* once for the tree */
10718                 free_extent_map(em);
10719         }
10720
10721         unlock_chunks(root);
10722
10723         ret = remove_block_group_free_space(trans, root->fs_info, block_group);
10724         if (ret)
10725                 goto out;
10726
10727         btrfs_put_block_group(block_group);
10728         btrfs_put_block_group(block_group);
10729
10730         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10731         if (ret > 0)
10732                 ret = -EIO;
10733         if (ret < 0)
10734                 goto out;
10735
10736         ret = btrfs_del_item(trans, root, path);
10737 out:
10738         btrfs_free_path(path);
10739         return ret;
10740 }
10741
10742 struct btrfs_trans_handle *
10743 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10744                                      const u64 chunk_offset)
10745 {
10746         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10747         struct extent_map *em;
10748         struct map_lookup *map;
10749         unsigned int num_items;
10750
10751         read_lock(&em_tree->lock);
10752         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10753         read_unlock(&em_tree->lock);
10754         ASSERT(em && em->start == chunk_offset);
10755
10756         /*
10757          * We need to reserve 3 + N units from the metadata space info in order
10758          * to remove a block group (done at btrfs_remove_chunk() and at
10759          * btrfs_remove_block_group()), which are used for:
10760          *
10761          * 1 unit for adding the free space inode's orphan (located in the tree
10762          * of tree roots).
10763          * 1 unit for deleting the block group item (located in the extent
10764          * tree).
10765          * 1 unit for deleting the free space item (located in tree of tree
10766          * roots).
10767          * N units for deleting N device extent items corresponding to each
10768          * stripe (located in the device tree).
10769          *
10770          * In order to remove a block group we also need to reserve units in the
10771          * system space info in order to update the chunk tree (update one or
10772          * more device items and remove one chunk item), but this is done at
10773          * btrfs_remove_chunk() through a call to check_system_chunk().
10774          */
10775         map = em->map_lookup;
10776         num_items = 3 + map->num_stripes;
10777         free_extent_map(em);
10778
10779         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10780                                                            num_items, 1);
10781 }
10782
10783 /*
10784  * Process the unused_bgs list and remove any that don't have any allocated
10785  * space inside of them.
10786  */
10787 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10788 {
10789         struct btrfs_block_group_cache *block_group;
10790         struct btrfs_space_info *space_info;
10791         struct btrfs_root *root = fs_info->extent_root;
10792         struct btrfs_trans_handle *trans;
10793         int ret = 0;
10794
10795         if (!fs_info->open)
10796                 return;
10797
10798         spin_lock(&fs_info->unused_bgs_lock);
10799         while (!list_empty(&fs_info->unused_bgs)) {
10800                 u64 start, end;
10801                 int trimming;
10802
10803                 block_group = list_first_entry(&fs_info->unused_bgs,
10804                                                struct btrfs_block_group_cache,
10805                                                bg_list);
10806                 list_del_init(&block_group->bg_list);
10807
10808                 space_info = block_group->space_info;
10809
10810                 if (ret || btrfs_mixed_space_info(space_info)) {
10811                         btrfs_put_block_group(block_group);
10812                         continue;
10813                 }
10814                 spin_unlock(&fs_info->unused_bgs_lock);
10815
10816                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10817
10818                 /* Don't want to race with allocators so take the groups_sem */
10819                 down_write(&space_info->groups_sem);
10820                 spin_lock(&block_group->lock);
10821                 if (block_group->reserved ||
10822                     btrfs_block_group_used(&block_group->item) ||
10823                     block_group->ro ||
10824                     list_is_singular(&block_group->list)) {
10825                         /*
10826                          * We want to bail if we made new allocations or have
10827                          * outstanding allocations in this block group.  We do
10828                          * the ro check in case balance is currently acting on
10829                          * this block group.
10830                          */
10831                         spin_unlock(&block_group->lock);
10832                         up_write(&space_info->groups_sem);
10833                         goto next;
10834                 }
10835                 spin_unlock(&block_group->lock);
10836
10837                 /* We don't want to force the issue, only flip if it's ok. */
10838                 ret = inc_block_group_ro(block_group, 0);
10839                 up_write(&space_info->groups_sem);
10840                 if (ret < 0) {
10841                         ret = 0;
10842                         goto next;
10843                 }
10844
10845                 /*
10846                  * Want to do this before we do anything else so we can recover
10847                  * properly if we fail to join the transaction.
10848                  */
10849                 trans = btrfs_start_trans_remove_block_group(fs_info,
10850                                                      block_group->key.objectid);
10851                 if (IS_ERR(trans)) {
10852                         btrfs_dec_block_group_ro(root, block_group);
10853                         ret = PTR_ERR(trans);
10854                         goto next;
10855                 }
10856
10857                 /*
10858                  * We could have pending pinned extents for this block group,
10859                  * just delete them, we don't care about them anymore.
10860                  */
10861                 start = block_group->key.objectid;
10862                 end = start + block_group->key.offset - 1;
10863                 /*
10864                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10865                  * btrfs_finish_extent_commit(). If we are at transaction N,
10866                  * another task might be running finish_extent_commit() for the
10867                  * previous transaction N - 1, and have seen a range belonging
10868                  * to the block group in freed_extents[] before we were able to
10869                  * clear the whole block group range from freed_extents[]. This
10870                  * means that task can lookup for the block group after we
10871                  * unpinned it from freed_extents[] and removed it, leading to
10872                  * a BUG_ON() at btrfs_unpin_extent_range().
10873                  */
10874                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10875                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10876                                   EXTENT_DIRTY);
10877                 if (ret) {
10878                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10879                         btrfs_dec_block_group_ro(root, block_group);
10880                         goto end_trans;
10881                 }
10882                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10883                                   EXTENT_DIRTY);
10884                 if (ret) {
10885                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10886                         btrfs_dec_block_group_ro(root, block_group);
10887                         goto end_trans;
10888                 }
10889                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10890
10891                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10892                 spin_lock(&space_info->lock);
10893                 spin_lock(&block_group->lock);
10894
10895                 space_info->bytes_pinned -= block_group->pinned;
10896                 space_info->bytes_readonly += block_group->pinned;
10897                 percpu_counter_add(&space_info->total_bytes_pinned,
10898                                    -block_group->pinned);
10899                 block_group->pinned = 0;
10900
10901                 spin_unlock(&block_group->lock);
10902                 spin_unlock(&space_info->lock);
10903
10904                 /* DISCARD can flip during remount */
10905                 trimming = btrfs_test_opt(root->fs_info, DISCARD);
10906
10907                 /* Implicit trim during transaction commit. */
10908                 if (trimming)
10909                         btrfs_get_block_group_trimming(block_group);
10910
10911                 /*
10912                  * Btrfs_remove_chunk will abort the transaction if things go
10913                  * horribly wrong.
10914                  */
10915                 ret = btrfs_remove_chunk(trans, root,
10916                                          block_group->key.objectid);
10917
10918                 if (ret) {
10919                         if (trimming)
10920                                 btrfs_put_block_group_trimming(block_group);
10921                         goto end_trans;
10922                 }
10923
10924                 /*
10925                  * If we're not mounted with -odiscard, we can just forget
10926                  * about this block group. Otherwise we'll need to wait
10927                  * until transaction commit to do the actual discard.
10928                  */
10929                 if (trimming) {
10930                         spin_lock(&fs_info->unused_bgs_lock);
10931                         /*
10932                          * A concurrent scrub might have added us to the list
10933                          * fs_info->unused_bgs, so use a list_move operation
10934                          * to add the block group to the deleted_bgs list.
10935                          */
10936                         list_move(&block_group->bg_list,
10937                                   &trans->transaction->deleted_bgs);
10938                         spin_unlock(&fs_info->unused_bgs_lock);
10939                         btrfs_get_block_group(block_group);
10940                 }
10941 end_trans:
10942                 btrfs_end_transaction(trans, root);
10943 next:
10944                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10945                 btrfs_put_block_group(block_group);
10946                 spin_lock(&fs_info->unused_bgs_lock);
10947         }
10948         spin_unlock(&fs_info->unused_bgs_lock);
10949 }
10950
10951 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10952 {
10953         struct btrfs_space_info *space_info;
10954         struct btrfs_super_block *disk_super;
10955         u64 features;
10956         u64 flags;
10957         int mixed = 0;
10958         int ret;
10959
10960         disk_super = fs_info->super_copy;
10961         if (!btrfs_super_root(disk_super))
10962                 return -EINVAL;
10963
10964         features = btrfs_super_incompat_flags(disk_super);
10965         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10966                 mixed = 1;
10967
10968         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10969         ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10970         if (ret)
10971                 goto out;
10972
10973         if (mixed) {
10974                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10975                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10976         } else {
10977                 flags = BTRFS_BLOCK_GROUP_METADATA;
10978                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10979                 if (ret)
10980                         goto out;
10981
10982                 flags = BTRFS_BLOCK_GROUP_DATA;
10983                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10984         }
10985 out:
10986         return ret;
10987 }
10988
10989 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10990 {
10991         return unpin_extent_range(root, start, end, false);
10992 }
10993
10994 /*
10995  * It used to be that old block groups would be left around forever.
10996  * Iterating over them would be enough to trim unused space.  Since we
10997  * now automatically remove them, we also need to iterate over unallocated
10998  * space.
10999  *
11000  * We don't want a transaction for this since the discard may take a
11001  * substantial amount of time.  We don't require that a transaction be
11002  * running, but we do need to take a running transaction into account
11003  * to ensure that we're not discarding chunks that were released in
11004  * the current transaction.
11005  *
11006  * Holding the chunks lock will prevent other threads from allocating
11007  * or releasing chunks, but it won't prevent a running transaction
11008  * from committing and releasing the memory that the pending chunks
11009  * list head uses.  For that, we need to take a reference to the
11010  * transaction.
11011  */
11012 static int btrfs_trim_free_extents(struct btrfs_device *device,
11013                                    u64 minlen, u64 *trimmed)
11014 {
11015         u64 start = 0, len = 0;
11016         int ret;
11017
11018         *trimmed = 0;
11019
11020         /* Not writeable = nothing to do. */
11021         if (!device->writeable)
11022                 return 0;
11023
11024         /* No free space = nothing to do. */
11025         if (device->total_bytes <= device->bytes_used)
11026                 return 0;
11027
11028         ret = 0;
11029
11030         while (1) {
11031                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
11032                 struct btrfs_transaction *trans;
11033                 u64 bytes;
11034
11035                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
11036                 if (ret)
11037                         return ret;
11038
11039                 down_read(&fs_info->commit_root_sem);
11040
11041                 spin_lock(&fs_info->trans_lock);
11042                 trans = fs_info->running_transaction;
11043                 if (trans)
11044                         atomic_inc(&trans->use_count);
11045                 spin_unlock(&fs_info->trans_lock);
11046
11047                 ret = find_free_dev_extent_start(trans, device, minlen, start,
11048                                                  &start, &len);
11049                 if (trans)
11050                         btrfs_put_transaction(trans);
11051
11052                 if (ret) {
11053                         up_read(&fs_info->commit_root_sem);
11054                         mutex_unlock(&fs_info->chunk_mutex);
11055                         if (ret == -ENOSPC)
11056                                 ret = 0;
11057                         break;
11058                 }
11059
11060                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11061                 up_read(&fs_info->commit_root_sem);
11062                 mutex_unlock(&fs_info->chunk_mutex);
11063
11064                 if (ret)
11065                         break;
11066
11067                 start += len;
11068                 *trimmed += bytes;
11069
11070                 if (fatal_signal_pending(current)) {
11071                         ret = -ERESTARTSYS;
11072                         break;
11073                 }
11074
11075                 cond_resched();
11076         }
11077
11078         return ret;
11079 }
11080
11081 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
11082 {
11083         struct btrfs_fs_info *fs_info = root->fs_info;
11084         struct btrfs_block_group_cache *cache = NULL;
11085         struct btrfs_device *device;
11086         struct list_head *devices;
11087         u64 group_trimmed;
11088         u64 start;
11089         u64 end;
11090         u64 trimmed = 0;
11091         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
11092         int ret = 0;
11093
11094         /*
11095          * try to trim all FS space, our block group may start from non-zero.
11096          */
11097         if (range->len == total_bytes)
11098                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
11099         else
11100                 cache = btrfs_lookup_block_group(fs_info, range->start);
11101
11102         while (cache) {
11103                 if (cache->key.objectid >= (range->start + range->len)) {
11104                         btrfs_put_block_group(cache);
11105                         break;
11106                 }
11107
11108                 start = max(range->start, cache->key.objectid);
11109                 end = min(range->start + range->len,
11110                                 cache->key.objectid + cache->key.offset);
11111
11112                 if (end - start >= range->minlen) {
11113                         if (!block_group_cache_done(cache)) {
11114                                 ret = cache_block_group(cache, 0);
11115                                 if (ret) {
11116                                         btrfs_put_block_group(cache);
11117                                         break;
11118                                 }
11119                                 ret = wait_block_group_cache_done(cache);
11120                                 if (ret) {
11121                                         btrfs_put_block_group(cache);
11122                                         break;
11123                                 }
11124                         }
11125                         ret = btrfs_trim_block_group(cache,
11126                                                      &group_trimmed,
11127                                                      start,
11128                                                      end,
11129                                                      range->minlen);
11130
11131                         trimmed += group_trimmed;
11132                         if (ret) {
11133                                 btrfs_put_block_group(cache);
11134                                 break;
11135                         }
11136                 }
11137
11138                 cache = next_block_group(fs_info->tree_root, cache);
11139         }
11140
11141         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
11142         devices = &root->fs_info->fs_devices->alloc_list;
11143         list_for_each_entry(device, devices, dev_alloc_list) {
11144                 ret = btrfs_trim_free_extents(device, range->minlen,
11145                                               &group_trimmed);
11146                 if (ret)
11147                         break;
11148
11149                 trimmed += group_trimmed;
11150         }
11151         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
11152
11153         range->len = trimmed;
11154         return ret;
11155 }
11156
11157 /*
11158  * btrfs_{start,end}_write_no_snapshoting() are similar to
11159  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
11160  * data into the page cache through nocow before the subvolume is snapshoted,
11161  * but flush the data into disk after the snapshot creation, or to prevent
11162  * operations while snapshoting is ongoing and that cause the snapshot to be
11163  * inconsistent (writes followed by expanding truncates for example).
11164  */
11165 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
11166 {
11167         percpu_counter_dec(&root->subv_writers->counter);
11168         /*
11169          * Make sure counter is updated before we wake up waiters.
11170          */
11171         smp_mb();
11172         if (waitqueue_active(&root->subv_writers->wait))
11173                 wake_up(&root->subv_writers->wait);
11174 }
11175
11176 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
11177 {
11178         if (atomic_read(&root->will_be_snapshoted))
11179                 return 0;
11180
11181         percpu_counter_inc(&root->subv_writers->counter);
11182         /*
11183          * Make sure counter is updated before we check for snapshot creation.
11184          */
11185         smp_mb();
11186         if (atomic_read(&root->will_be_snapshoted)) {
11187                 btrfs_end_write_no_snapshoting(root);
11188                 return 0;
11189         }
11190         return 1;
11191 }
11192
11193 static int wait_snapshoting_atomic_t(atomic_t *a)
11194 {
11195         schedule();
11196         return 0;
11197 }
11198
11199 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11200 {
11201         while (true) {
11202                 int ret;
11203
11204                 ret = btrfs_start_write_no_snapshoting(root);
11205                 if (ret)
11206                         break;
11207                 wait_on_atomic_t(&root->will_be_snapshoted,
11208                                  wait_snapshoting_atomic_t,
11209                                  TASK_UNINTERRUPTIBLE);
11210         }
11211 }