Merge branch 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include <linux/btrfs.h>
34 #include <linux/uio.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
40 #include "tree-log.h"
41 #include "locking.h"
42 #include "volumes.h"
43 #include "qgroup.h"
44
45 static struct kmem_cache *btrfs_inode_defrag_cachep;
46 /*
47  * when auto defrag is enabled we
48  * queue up these defrag structs to remember which
49  * inodes need defragging passes
50  */
51 struct inode_defrag {
52         struct rb_node rb_node;
53         /* objectid */
54         u64 ino;
55         /*
56          * transid where the defrag was added, we search for
57          * extents newer than this
58          */
59         u64 transid;
60
61         /* root objectid */
62         u64 root;
63
64         /* last offset we were able to defrag */
65         u64 last_offset;
66
67         /* if we've wrapped around back to zero once already */
68         int cycled;
69 };
70
71 static int __compare_inode_defrag(struct inode_defrag *defrag1,
72                                   struct inode_defrag *defrag2)
73 {
74         if (defrag1->root > defrag2->root)
75                 return 1;
76         else if (defrag1->root < defrag2->root)
77                 return -1;
78         else if (defrag1->ino > defrag2->ino)
79                 return 1;
80         else if (defrag1->ino < defrag2->ino)
81                 return -1;
82         else
83                 return 0;
84 }
85
86 /* pop a record for an inode into the defrag tree.  The lock
87  * must be held already
88  *
89  * If you're inserting a record for an older transid than an
90  * existing record, the transid already in the tree is lowered
91  *
92  * If an existing record is found the defrag item you
93  * pass in is freed
94  */
95 static int __btrfs_add_inode_defrag(struct inode *inode,
96                                     struct inode_defrag *defrag)
97 {
98         struct btrfs_root *root = BTRFS_I(inode)->root;
99         struct inode_defrag *entry;
100         struct rb_node **p;
101         struct rb_node *parent = NULL;
102         int ret;
103
104         p = &root->fs_info->defrag_inodes.rb_node;
105         while (*p) {
106                 parent = *p;
107                 entry = rb_entry(parent, struct inode_defrag, rb_node);
108
109                 ret = __compare_inode_defrag(defrag, entry);
110                 if (ret < 0)
111                         p = &parent->rb_left;
112                 else if (ret > 0)
113                         p = &parent->rb_right;
114                 else {
115                         /* if we're reinserting an entry for
116                          * an old defrag run, make sure to
117                          * lower the transid of our existing record
118                          */
119                         if (defrag->transid < entry->transid)
120                                 entry->transid = defrag->transid;
121                         if (defrag->last_offset > entry->last_offset)
122                                 entry->last_offset = defrag->last_offset;
123                         return -EEXIST;
124                 }
125         }
126         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
127         rb_link_node(&defrag->rb_node, parent, p);
128         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
129         return 0;
130 }
131
132 static inline int __need_auto_defrag(struct btrfs_root *root)
133 {
134         if (!btrfs_test_opt(root, AUTO_DEFRAG))
135                 return 0;
136
137         if (btrfs_fs_closing(root->fs_info))
138                 return 0;
139
140         return 1;
141 }
142
143 /*
144  * insert a defrag record for this inode if auto defrag is
145  * enabled
146  */
147 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148                            struct inode *inode)
149 {
150         struct btrfs_root *root = BTRFS_I(inode)->root;
151         struct inode_defrag *defrag;
152         u64 transid;
153         int ret;
154
155         if (!__need_auto_defrag(root))
156                 return 0;
157
158         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
159                 return 0;
160
161         if (trans)
162                 transid = trans->transid;
163         else
164                 transid = BTRFS_I(inode)->root->last_trans;
165
166         defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
167         if (!defrag)
168                 return -ENOMEM;
169
170         defrag->ino = btrfs_ino(inode);
171         defrag->transid = transid;
172         defrag->root = root->root_key.objectid;
173
174         spin_lock(&root->fs_info->defrag_inodes_lock);
175         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
176                 /*
177                  * If we set IN_DEFRAG flag and evict the inode from memory,
178                  * and then re-read this inode, this new inode doesn't have
179                  * IN_DEFRAG flag. At the case, we may find the existed defrag.
180                  */
181                 ret = __btrfs_add_inode_defrag(inode, defrag);
182                 if (ret)
183                         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
184         } else {
185                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
186         }
187         spin_unlock(&root->fs_info->defrag_inodes_lock);
188         return 0;
189 }
190
191 /*
192  * Requeue the defrag object. If there is a defrag object that points to
193  * the same inode in the tree, we will merge them together (by
194  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
195  */
196 static void btrfs_requeue_inode_defrag(struct inode *inode,
197                                        struct inode_defrag *defrag)
198 {
199         struct btrfs_root *root = BTRFS_I(inode)->root;
200         int ret;
201
202         if (!__need_auto_defrag(root))
203                 goto out;
204
205         /*
206          * Here we don't check the IN_DEFRAG flag, because we need merge
207          * them together.
208          */
209         spin_lock(&root->fs_info->defrag_inodes_lock);
210         ret = __btrfs_add_inode_defrag(inode, defrag);
211         spin_unlock(&root->fs_info->defrag_inodes_lock);
212         if (ret)
213                 goto out;
214         return;
215 out:
216         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
217 }
218
219 /*
220  * pick the defragable inode that we want, if it doesn't exist, we will get
221  * the next one.
222  */
223 static struct inode_defrag *
224 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
225 {
226         struct inode_defrag *entry = NULL;
227         struct inode_defrag tmp;
228         struct rb_node *p;
229         struct rb_node *parent = NULL;
230         int ret;
231
232         tmp.ino = ino;
233         tmp.root = root;
234
235         spin_lock(&fs_info->defrag_inodes_lock);
236         p = fs_info->defrag_inodes.rb_node;
237         while (p) {
238                 parent = p;
239                 entry = rb_entry(parent, struct inode_defrag, rb_node);
240
241                 ret = __compare_inode_defrag(&tmp, entry);
242                 if (ret < 0)
243                         p = parent->rb_left;
244                 else if (ret > 0)
245                         p = parent->rb_right;
246                 else
247                         goto out;
248         }
249
250         if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
251                 parent = rb_next(parent);
252                 if (parent)
253                         entry = rb_entry(parent, struct inode_defrag, rb_node);
254                 else
255                         entry = NULL;
256         }
257 out:
258         if (entry)
259                 rb_erase(parent, &fs_info->defrag_inodes);
260         spin_unlock(&fs_info->defrag_inodes_lock);
261         return entry;
262 }
263
264 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
265 {
266         struct inode_defrag *defrag;
267         struct rb_node *node;
268
269         spin_lock(&fs_info->defrag_inodes_lock);
270         node = rb_first(&fs_info->defrag_inodes);
271         while (node) {
272                 rb_erase(node, &fs_info->defrag_inodes);
273                 defrag = rb_entry(node, struct inode_defrag, rb_node);
274                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
275
276                 cond_resched_lock(&fs_info->defrag_inodes_lock);
277
278                 node = rb_first(&fs_info->defrag_inodes);
279         }
280         spin_unlock(&fs_info->defrag_inodes_lock);
281 }
282
283 #define BTRFS_DEFRAG_BATCH      1024
284
285 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
286                                     struct inode_defrag *defrag)
287 {
288         struct btrfs_root *inode_root;
289         struct inode *inode;
290         struct btrfs_key key;
291         struct btrfs_ioctl_defrag_range_args range;
292         int num_defrag;
293         int index;
294         int ret;
295
296         /* get the inode */
297         key.objectid = defrag->root;
298         key.type = BTRFS_ROOT_ITEM_KEY;
299         key.offset = (u64)-1;
300
301         index = srcu_read_lock(&fs_info->subvol_srcu);
302
303         inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
304         if (IS_ERR(inode_root)) {
305                 ret = PTR_ERR(inode_root);
306                 goto cleanup;
307         }
308
309         key.objectid = defrag->ino;
310         key.type = BTRFS_INODE_ITEM_KEY;
311         key.offset = 0;
312         inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
313         if (IS_ERR(inode)) {
314                 ret = PTR_ERR(inode);
315                 goto cleanup;
316         }
317         srcu_read_unlock(&fs_info->subvol_srcu, index);
318
319         /* do a chunk of defrag */
320         clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
321         memset(&range, 0, sizeof(range));
322         range.len = (u64)-1;
323         range.start = defrag->last_offset;
324
325         sb_start_write(fs_info->sb);
326         num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
327                                        BTRFS_DEFRAG_BATCH);
328         sb_end_write(fs_info->sb);
329         /*
330          * if we filled the whole defrag batch, there
331          * must be more work to do.  Queue this defrag
332          * again
333          */
334         if (num_defrag == BTRFS_DEFRAG_BATCH) {
335                 defrag->last_offset = range.start;
336                 btrfs_requeue_inode_defrag(inode, defrag);
337         } else if (defrag->last_offset && !defrag->cycled) {
338                 /*
339                  * we didn't fill our defrag batch, but
340                  * we didn't start at zero.  Make sure we loop
341                  * around to the start of the file.
342                  */
343                 defrag->last_offset = 0;
344                 defrag->cycled = 1;
345                 btrfs_requeue_inode_defrag(inode, defrag);
346         } else {
347                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
348         }
349
350         iput(inode);
351         return 0;
352 cleanup:
353         srcu_read_unlock(&fs_info->subvol_srcu, index);
354         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
355         return ret;
356 }
357
358 /*
359  * run through the list of inodes in the FS that need
360  * defragging
361  */
362 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
363 {
364         struct inode_defrag *defrag;
365         u64 first_ino = 0;
366         u64 root_objectid = 0;
367
368         atomic_inc(&fs_info->defrag_running);
369         while (1) {
370                 /* Pause the auto defragger. */
371                 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
372                              &fs_info->fs_state))
373                         break;
374
375                 if (!__need_auto_defrag(fs_info->tree_root))
376                         break;
377
378                 /* find an inode to defrag */
379                 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
380                                                  first_ino);
381                 if (!defrag) {
382                         if (root_objectid || first_ino) {
383                                 root_objectid = 0;
384                                 first_ino = 0;
385                                 continue;
386                         } else {
387                                 break;
388                         }
389                 }
390
391                 first_ino = defrag->ino + 1;
392                 root_objectid = defrag->root;
393
394                 __btrfs_run_defrag_inode(fs_info, defrag);
395         }
396         atomic_dec(&fs_info->defrag_running);
397
398         /*
399          * during unmount, we use the transaction_wait queue to
400          * wait for the defragger to stop
401          */
402         wake_up(&fs_info->transaction_wait);
403         return 0;
404 }
405
406 /* simple helper to fault in pages and copy.  This should go away
407  * and be replaced with calls into generic code.
408  */
409 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
410                                          size_t write_bytes,
411                                          struct page **prepared_pages,
412                                          struct iov_iter *i)
413 {
414         size_t copied = 0;
415         size_t total_copied = 0;
416         int pg = 0;
417         int offset = pos & (PAGE_CACHE_SIZE - 1);
418
419         while (write_bytes > 0) {
420                 size_t count = min_t(size_t,
421                                      PAGE_CACHE_SIZE - offset, write_bytes);
422                 struct page *page = prepared_pages[pg];
423                 /*
424                  * Copy data from userspace to the current page
425                  */
426                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
427
428                 /* Flush processor's dcache for this page */
429                 flush_dcache_page(page);
430
431                 /*
432                  * if we get a partial write, we can end up with
433                  * partially up to date pages.  These add
434                  * a lot of complexity, so make sure they don't
435                  * happen by forcing this copy to be retried.
436                  *
437                  * The rest of the btrfs_file_write code will fall
438                  * back to page at a time copies after we return 0.
439                  */
440                 if (!PageUptodate(page) && copied < count)
441                         copied = 0;
442
443                 iov_iter_advance(i, copied);
444                 write_bytes -= copied;
445                 total_copied += copied;
446
447                 /* Return to btrfs_file_write_iter to fault page */
448                 if (unlikely(copied == 0))
449                         break;
450
451                 if (copied < PAGE_CACHE_SIZE - offset) {
452                         offset += copied;
453                 } else {
454                         pg++;
455                         offset = 0;
456                 }
457         }
458         return total_copied;
459 }
460
461 /*
462  * unlocks pages after btrfs_file_write is done with them
463  */
464 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
465 {
466         size_t i;
467         for (i = 0; i < num_pages; i++) {
468                 /* page checked is some magic around finding pages that
469                  * have been modified without going through btrfs_set_page_dirty
470                  * clear it here. There should be no need to mark the pages
471                  * accessed as prepare_pages should have marked them accessed
472                  * in prepare_pages via find_or_create_page()
473                  */
474                 ClearPageChecked(pages[i]);
475                 unlock_page(pages[i]);
476                 page_cache_release(pages[i]);
477         }
478 }
479
480 /*
481  * after copy_from_user, pages need to be dirtied and we need to make
482  * sure holes are created between the current EOF and the start of
483  * any next extents (if required).
484  *
485  * this also makes the decision about creating an inline extent vs
486  * doing real data extents, marking pages dirty and delalloc as required.
487  */
488 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
489                              struct page **pages, size_t num_pages,
490                              loff_t pos, size_t write_bytes,
491                              struct extent_state **cached)
492 {
493         int err = 0;
494         int i;
495         u64 num_bytes;
496         u64 start_pos;
497         u64 end_of_last_block;
498         u64 end_pos = pos + write_bytes;
499         loff_t isize = i_size_read(inode);
500
501         start_pos = pos & ~((u64)root->sectorsize - 1);
502         num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
503
504         end_of_last_block = start_pos + num_bytes - 1;
505         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
506                                         cached);
507         if (err)
508                 return err;
509
510         for (i = 0; i < num_pages; i++) {
511                 struct page *p = pages[i];
512                 SetPageUptodate(p);
513                 ClearPageChecked(p);
514                 set_page_dirty(p);
515         }
516
517         /*
518          * we've only changed i_size in ram, and we haven't updated
519          * the disk i_size.  There is no need to log the inode
520          * at this time.
521          */
522         if (end_pos > isize)
523                 i_size_write(inode, end_pos);
524         return 0;
525 }
526
527 /*
528  * this drops all the extents in the cache that intersect the range
529  * [start, end].  Existing extents are split as required.
530  */
531 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
532                              int skip_pinned)
533 {
534         struct extent_map *em;
535         struct extent_map *split = NULL;
536         struct extent_map *split2 = NULL;
537         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
538         u64 len = end - start + 1;
539         u64 gen;
540         int ret;
541         int testend = 1;
542         unsigned long flags;
543         int compressed = 0;
544         bool modified;
545
546         WARN_ON(end < start);
547         if (end == (u64)-1) {
548                 len = (u64)-1;
549                 testend = 0;
550         }
551         while (1) {
552                 int no_splits = 0;
553
554                 modified = false;
555                 if (!split)
556                         split = alloc_extent_map();
557                 if (!split2)
558                         split2 = alloc_extent_map();
559                 if (!split || !split2)
560                         no_splits = 1;
561
562                 write_lock(&em_tree->lock);
563                 em = lookup_extent_mapping(em_tree, start, len);
564                 if (!em) {
565                         write_unlock(&em_tree->lock);
566                         break;
567                 }
568                 flags = em->flags;
569                 gen = em->generation;
570                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
571                         if (testend && em->start + em->len >= start + len) {
572                                 free_extent_map(em);
573                                 write_unlock(&em_tree->lock);
574                                 break;
575                         }
576                         start = em->start + em->len;
577                         if (testend)
578                                 len = start + len - (em->start + em->len);
579                         free_extent_map(em);
580                         write_unlock(&em_tree->lock);
581                         continue;
582                 }
583                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
584                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
585                 clear_bit(EXTENT_FLAG_LOGGING, &flags);
586                 modified = !list_empty(&em->list);
587                 if (no_splits)
588                         goto next;
589
590                 if (em->start < start) {
591                         split->start = em->start;
592                         split->len = start - em->start;
593
594                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
595                                 split->orig_start = em->orig_start;
596                                 split->block_start = em->block_start;
597
598                                 if (compressed)
599                                         split->block_len = em->block_len;
600                                 else
601                                         split->block_len = split->len;
602                                 split->orig_block_len = max(split->block_len,
603                                                 em->orig_block_len);
604                                 split->ram_bytes = em->ram_bytes;
605                         } else {
606                                 split->orig_start = split->start;
607                                 split->block_len = 0;
608                                 split->block_start = em->block_start;
609                                 split->orig_block_len = 0;
610                                 split->ram_bytes = split->len;
611                         }
612
613                         split->generation = gen;
614                         split->bdev = em->bdev;
615                         split->flags = flags;
616                         split->compress_type = em->compress_type;
617                         replace_extent_mapping(em_tree, em, split, modified);
618                         free_extent_map(split);
619                         split = split2;
620                         split2 = NULL;
621                 }
622                 if (testend && em->start + em->len > start + len) {
623                         u64 diff = start + len - em->start;
624
625                         split->start = start + len;
626                         split->len = em->start + em->len - (start + len);
627                         split->bdev = em->bdev;
628                         split->flags = flags;
629                         split->compress_type = em->compress_type;
630                         split->generation = gen;
631
632                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
633                                 split->orig_block_len = max(em->block_len,
634                                                     em->orig_block_len);
635
636                                 split->ram_bytes = em->ram_bytes;
637                                 if (compressed) {
638                                         split->block_len = em->block_len;
639                                         split->block_start = em->block_start;
640                                         split->orig_start = em->orig_start;
641                                 } else {
642                                         split->block_len = split->len;
643                                         split->block_start = em->block_start
644                                                 + diff;
645                                         split->orig_start = em->orig_start;
646                                 }
647                         } else {
648                                 split->ram_bytes = split->len;
649                                 split->orig_start = split->start;
650                                 split->block_len = 0;
651                                 split->block_start = em->block_start;
652                                 split->orig_block_len = 0;
653                         }
654
655                         if (extent_map_in_tree(em)) {
656                                 replace_extent_mapping(em_tree, em, split,
657                                                        modified);
658                         } else {
659                                 ret = add_extent_mapping(em_tree, split,
660                                                          modified);
661                                 ASSERT(ret == 0); /* Logic error */
662                         }
663                         free_extent_map(split);
664                         split = NULL;
665                 }
666 next:
667                 if (extent_map_in_tree(em))
668                         remove_extent_mapping(em_tree, em);
669                 write_unlock(&em_tree->lock);
670
671                 /* once for us */
672                 free_extent_map(em);
673                 /* once for the tree*/
674                 free_extent_map(em);
675         }
676         if (split)
677                 free_extent_map(split);
678         if (split2)
679                 free_extent_map(split2);
680 }
681
682 /*
683  * this is very complex, but the basic idea is to drop all extents
684  * in the range start - end.  hint_block is filled in with a block number
685  * that would be a good hint to the block allocator for this file.
686  *
687  * If an extent intersects the range but is not entirely inside the range
688  * it is either truncated or split.  Anything entirely inside the range
689  * is deleted from the tree.
690  */
691 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
692                          struct btrfs_root *root, struct inode *inode,
693                          struct btrfs_path *path, u64 start, u64 end,
694                          u64 *drop_end, int drop_cache,
695                          int replace_extent,
696                          u32 extent_item_size,
697                          int *key_inserted)
698 {
699         struct extent_buffer *leaf;
700         struct btrfs_file_extent_item *fi;
701         struct btrfs_key key;
702         struct btrfs_key new_key;
703         u64 ino = btrfs_ino(inode);
704         u64 search_start = start;
705         u64 disk_bytenr = 0;
706         u64 num_bytes = 0;
707         u64 extent_offset = 0;
708         u64 extent_end = 0;
709         int del_nr = 0;
710         int del_slot = 0;
711         int extent_type;
712         int recow;
713         int ret;
714         int modify_tree = -1;
715         int update_refs;
716         int found = 0;
717         int leafs_visited = 0;
718
719         if (drop_cache)
720                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
721
722         if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
723                 modify_tree = 0;
724
725         update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
726                        root == root->fs_info->tree_root);
727         while (1) {
728                 recow = 0;
729                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
730                                                search_start, modify_tree);
731                 if (ret < 0)
732                         break;
733                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
734                         leaf = path->nodes[0];
735                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
736                         if (key.objectid == ino &&
737                             key.type == BTRFS_EXTENT_DATA_KEY)
738                                 path->slots[0]--;
739                 }
740                 ret = 0;
741                 leafs_visited++;
742 next_slot:
743                 leaf = path->nodes[0];
744                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
745                         BUG_ON(del_nr > 0);
746                         ret = btrfs_next_leaf(root, path);
747                         if (ret < 0)
748                                 break;
749                         if (ret > 0) {
750                                 ret = 0;
751                                 break;
752                         }
753                         leafs_visited++;
754                         leaf = path->nodes[0];
755                         recow = 1;
756                 }
757
758                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
759
760                 if (key.objectid > ino)
761                         break;
762                 if (WARN_ON_ONCE(key.objectid < ino) ||
763                     key.type < BTRFS_EXTENT_DATA_KEY) {
764                         ASSERT(del_nr == 0);
765                         path->slots[0]++;
766                         goto next_slot;
767                 }
768                 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
769                         break;
770
771                 fi = btrfs_item_ptr(leaf, path->slots[0],
772                                     struct btrfs_file_extent_item);
773                 extent_type = btrfs_file_extent_type(leaf, fi);
774
775                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
776                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
777                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
778                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
779                         extent_offset = btrfs_file_extent_offset(leaf, fi);
780                         extent_end = key.offset +
781                                 btrfs_file_extent_num_bytes(leaf, fi);
782                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
783                         extent_end = key.offset +
784                                 btrfs_file_extent_inline_len(leaf,
785                                                      path->slots[0], fi);
786                 } else {
787                         /* can't happen */
788                         BUG();
789                 }
790
791                 /*
792                  * Don't skip extent items representing 0 byte lengths. They
793                  * used to be created (bug) if while punching holes we hit
794                  * -ENOSPC condition. So if we find one here, just ensure we
795                  * delete it, otherwise we would insert a new file extent item
796                  * with the same key (offset) as that 0 bytes length file
797                  * extent item in the call to setup_items_for_insert() later
798                  * in this function.
799                  */
800                 if (extent_end == key.offset && extent_end >= search_start)
801                         goto delete_extent_item;
802
803                 if (extent_end <= search_start) {
804                         path->slots[0]++;
805                         goto next_slot;
806                 }
807
808                 found = 1;
809                 search_start = max(key.offset, start);
810                 if (recow || !modify_tree) {
811                         modify_tree = -1;
812                         btrfs_release_path(path);
813                         continue;
814                 }
815
816                 /*
817                  *     | - range to drop - |
818                  *  | -------- extent -------- |
819                  */
820                 if (start > key.offset && end < extent_end) {
821                         BUG_ON(del_nr > 0);
822                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
823                                 ret = -EOPNOTSUPP;
824                                 break;
825                         }
826
827                         memcpy(&new_key, &key, sizeof(new_key));
828                         new_key.offset = start;
829                         ret = btrfs_duplicate_item(trans, root, path,
830                                                    &new_key);
831                         if (ret == -EAGAIN) {
832                                 btrfs_release_path(path);
833                                 continue;
834                         }
835                         if (ret < 0)
836                                 break;
837
838                         leaf = path->nodes[0];
839                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
840                                             struct btrfs_file_extent_item);
841                         btrfs_set_file_extent_num_bytes(leaf, fi,
842                                                         start - key.offset);
843
844                         fi = btrfs_item_ptr(leaf, path->slots[0],
845                                             struct btrfs_file_extent_item);
846
847                         extent_offset += start - key.offset;
848                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
849                         btrfs_set_file_extent_num_bytes(leaf, fi,
850                                                         extent_end - start);
851                         btrfs_mark_buffer_dirty(leaf);
852
853                         if (update_refs && disk_bytenr > 0) {
854                                 ret = btrfs_inc_extent_ref(trans, root,
855                                                 disk_bytenr, num_bytes, 0,
856                                                 root->root_key.objectid,
857                                                 new_key.objectid,
858                                                 start - extent_offset);
859                                 BUG_ON(ret); /* -ENOMEM */
860                         }
861                         key.offset = start;
862                 }
863                 /*
864                  *  | ---- range to drop ----- |
865                  *      | -------- extent -------- |
866                  */
867                 if (start <= key.offset && end < extent_end) {
868                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
869                                 ret = -EOPNOTSUPP;
870                                 break;
871                         }
872
873                         memcpy(&new_key, &key, sizeof(new_key));
874                         new_key.offset = end;
875                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
876
877                         extent_offset += end - key.offset;
878                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
879                         btrfs_set_file_extent_num_bytes(leaf, fi,
880                                                         extent_end - end);
881                         btrfs_mark_buffer_dirty(leaf);
882                         if (update_refs && disk_bytenr > 0)
883                                 inode_sub_bytes(inode, end - key.offset);
884                         break;
885                 }
886
887                 search_start = extent_end;
888                 /*
889                  *       | ---- range to drop ----- |
890                  *  | -------- extent -------- |
891                  */
892                 if (start > key.offset && end >= extent_end) {
893                         BUG_ON(del_nr > 0);
894                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
895                                 ret = -EOPNOTSUPP;
896                                 break;
897                         }
898
899                         btrfs_set_file_extent_num_bytes(leaf, fi,
900                                                         start - key.offset);
901                         btrfs_mark_buffer_dirty(leaf);
902                         if (update_refs && disk_bytenr > 0)
903                                 inode_sub_bytes(inode, extent_end - start);
904                         if (end == extent_end)
905                                 break;
906
907                         path->slots[0]++;
908                         goto next_slot;
909                 }
910
911                 /*
912                  *  | ---- range to drop ----- |
913                  *    | ------ extent ------ |
914                  */
915                 if (start <= key.offset && end >= extent_end) {
916 delete_extent_item:
917                         if (del_nr == 0) {
918                                 del_slot = path->slots[0];
919                                 del_nr = 1;
920                         } else {
921                                 BUG_ON(del_slot + del_nr != path->slots[0]);
922                                 del_nr++;
923                         }
924
925                         if (update_refs &&
926                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
927                                 inode_sub_bytes(inode,
928                                                 extent_end - key.offset);
929                                 extent_end = ALIGN(extent_end,
930                                                    root->sectorsize);
931                         } else if (update_refs && disk_bytenr > 0) {
932                                 ret = btrfs_free_extent(trans, root,
933                                                 disk_bytenr, num_bytes, 0,
934                                                 root->root_key.objectid,
935                                                 key.objectid, key.offset -
936                                                 extent_offset);
937                                 BUG_ON(ret); /* -ENOMEM */
938                                 inode_sub_bytes(inode,
939                                                 extent_end - key.offset);
940                         }
941
942                         if (end == extent_end)
943                                 break;
944
945                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
946                                 path->slots[0]++;
947                                 goto next_slot;
948                         }
949
950                         ret = btrfs_del_items(trans, root, path, del_slot,
951                                               del_nr);
952                         if (ret) {
953                                 btrfs_abort_transaction(trans, root, ret);
954                                 break;
955                         }
956
957                         del_nr = 0;
958                         del_slot = 0;
959
960                         btrfs_release_path(path);
961                         continue;
962                 }
963
964                 BUG_ON(1);
965         }
966
967         if (!ret && del_nr > 0) {
968                 /*
969                  * Set path->slots[0] to first slot, so that after the delete
970                  * if items are move off from our leaf to its immediate left or
971                  * right neighbor leafs, we end up with a correct and adjusted
972                  * path->slots[0] for our insertion (if replace_extent != 0).
973                  */
974                 path->slots[0] = del_slot;
975                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
976                 if (ret)
977                         btrfs_abort_transaction(trans, root, ret);
978         }
979
980         leaf = path->nodes[0];
981         /*
982          * If btrfs_del_items() was called, it might have deleted a leaf, in
983          * which case it unlocked our path, so check path->locks[0] matches a
984          * write lock.
985          */
986         if (!ret && replace_extent && leafs_visited == 1 &&
987             (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
988              path->locks[0] == BTRFS_WRITE_LOCK) &&
989             btrfs_leaf_free_space(root, leaf) >=
990             sizeof(struct btrfs_item) + extent_item_size) {
991
992                 key.objectid = ino;
993                 key.type = BTRFS_EXTENT_DATA_KEY;
994                 key.offset = start;
995                 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
996                         struct btrfs_key slot_key;
997
998                         btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
999                         if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1000                                 path->slots[0]++;
1001                 }
1002                 setup_items_for_insert(root, path, &key,
1003                                        &extent_item_size,
1004                                        extent_item_size,
1005                                        sizeof(struct btrfs_item) +
1006                                        extent_item_size, 1);
1007                 *key_inserted = 1;
1008         }
1009
1010         if (!replace_extent || !(*key_inserted))
1011                 btrfs_release_path(path);
1012         if (drop_end)
1013                 *drop_end = found ? min(end, extent_end) : end;
1014         return ret;
1015 }
1016
1017 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1018                        struct btrfs_root *root, struct inode *inode, u64 start,
1019                        u64 end, int drop_cache)
1020 {
1021         struct btrfs_path *path;
1022         int ret;
1023
1024         path = btrfs_alloc_path();
1025         if (!path)
1026                 return -ENOMEM;
1027         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1028                                    drop_cache, 0, 0, NULL);
1029         btrfs_free_path(path);
1030         return ret;
1031 }
1032
1033 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1034                             u64 objectid, u64 bytenr, u64 orig_offset,
1035                             u64 *start, u64 *end)
1036 {
1037         struct btrfs_file_extent_item *fi;
1038         struct btrfs_key key;
1039         u64 extent_end;
1040
1041         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1042                 return 0;
1043
1044         btrfs_item_key_to_cpu(leaf, &key, slot);
1045         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1046                 return 0;
1047
1048         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1049         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1050             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1051             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1052             btrfs_file_extent_compression(leaf, fi) ||
1053             btrfs_file_extent_encryption(leaf, fi) ||
1054             btrfs_file_extent_other_encoding(leaf, fi))
1055                 return 0;
1056
1057         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1058         if ((*start && *start != key.offset) || (*end && *end != extent_end))
1059                 return 0;
1060
1061         *start = key.offset;
1062         *end = extent_end;
1063         return 1;
1064 }
1065
1066 /*
1067  * Mark extent in the range start - end as written.
1068  *
1069  * This changes extent type from 'pre-allocated' to 'regular'. If only
1070  * part of extent is marked as written, the extent will be split into
1071  * two or three.
1072  */
1073 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1074                               struct inode *inode, u64 start, u64 end)
1075 {
1076         struct btrfs_root *root = BTRFS_I(inode)->root;
1077         struct extent_buffer *leaf;
1078         struct btrfs_path *path;
1079         struct btrfs_file_extent_item *fi;
1080         struct btrfs_key key;
1081         struct btrfs_key new_key;
1082         u64 bytenr;
1083         u64 num_bytes;
1084         u64 extent_end;
1085         u64 orig_offset;
1086         u64 other_start;
1087         u64 other_end;
1088         u64 split;
1089         int del_nr = 0;
1090         int del_slot = 0;
1091         int recow;
1092         int ret;
1093         u64 ino = btrfs_ino(inode);
1094
1095         path = btrfs_alloc_path();
1096         if (!path)
1097                 return -ENOMEM;
1098 again:
1099         recow = 0;
1100         split = start;
1101         key.objectid = ino;
1102         key.type = BTRFS_EXTENT_DATA_KEY;
1103         key.offset = split;
1104
1105         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1106         if (ret < 0)
1107                 goto out;
1108         if (ret > 0 && path->slots[0] > 0)
1109                 path->slots[0]--;
1110
1111         leaf = path->nodes[0];
1112         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1113         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1114         fi = btrfs_item_ptr(leaf, path->slots[0],
1115                             struct btrfs_file_extent_item);
1116         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1117                BTRFS_FILE_EXTENT_PREALLOC);
1118         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1119         BUG_ON(key.offset > start || extent_end < end);
1120
1121         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1122         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1123         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1124         memcpy(&new_key, &key, sizeof(new_key));
1125
1126         if (start == key.offset && end < extent_end) {
1127                 other_start = 0;
1128                 other_end = start;
1129                 if (extent_mergeable(leaf, path->slots[0] - 1,
1130                                      ino, bytenr, orig_offset,
1131                                      &other_start, &other_end)) {
1132                         new_key.offset = end;
1133                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1134                         fi = btrfs_item_ptr(leaf, path->slots[0],
1135                                             struct btrfs_file_extent_item);
1136                         btrfs_set_file_extent_generation(leaf, fi,
1137                                                          trans->transid);
1138                         btrfs_set_file_extent_num_bytes(leaf, fi,
1139                                                         extent_end - end);
1140                         btrfs_set_file_extent_offset(leaf, fi,
1141                                                      end - orig_offset);
1142                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1143                                             struct btrfs_file_extent_item);
1144                         btrfs_set_file_extent_generation(leaf, fi,
1145                                                          trans->transid);
1146                         btrfs_set_file_extent_num_bytes(leaf, fi,
1147                                                         end - other_start);
1148                         btrfs_mark_buffer_dirty(leaf);
1149                         goto out;
1150                 }
1151         }
1152
1153         if (start > key.offset && end == extent_end) {
1154                 other_start = end;
1155                 other_end = 0;
1156                 if (extent_mergeable(leaf, path->slots[0] + 1,
1157                                      ino, bytenr, orig_offset,
1158                                      &other_start, &other_end)) {
1159                         fi = btrfs_item_ptr(leaf, path->slots[0],
1160                                             struct btrfs_file_extent_item);
1161                         btrfs_set_file_extent_num_bytes(leaf, fi,
1162                                                         start - key.offset);
1163                         btrfs_set_file_extent_generation(leaf, fi,
1164                                                          trans->transid);
1165                         path->slots[0]++;
1166                         new_key.offset = start;
1167                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1168
1169                         fi = btrfs_item_ptr(leaf, path->slots[0],
1170                                             struct btrfs_file_extent_item);
1171                         btrfs_set_file_extent_generation(leaf, fi,
1172                                                          trans->transid);
1173                         btrfs_set_file_extent_num_bytes(leaf, fi,
1174                                                         other_end - start);
1175                         btrfs_set_file_extent_offset(leaf, fi,
1176                                                      start - orig_offset);
1177                         btrfs_mark_buffer_dirty(leaf);
1178                         goto out;
1179                 }
1180         }
1181
1182         while (start > key.offset || end < extent_end) {
1183                 if (key.offset == start)
1184                         split = end;
1185
1186                 new_key.offset = split;
1187                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1188                 if (ret == -EAGAIN) {
1189                         btrfs_release_path(path);
1190                         goto again;
1191                 }
1192                 if (ret < 0) {
1193                         btrfs_abort_transaction(trans, root, ret);
1194                         goto out;
1195                 }
1196
1197                 leaf = path->nodes[0];
1198                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1199                                     struct btrfs_file_extent_item);
1200                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1201                 btrfs_set_file_extent_num_bytes(leaf, fi,
1202                                                 split - key.offset);
1203
1204                 fi = btrfs_item_ptr(leaf, path->slots[0],
1205                                     struct btrfs_file_extent_item);
1206
1207                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1208                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1209                 btrfs_set_file_extent_num_bytes(leaf, fi,
1210                                                 extent_end - split);
1211                 btrfs_mark_buffer_dirty(leaf);
1212
1213                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1214                                            root->root_key.objectid,
1215                                            ino, orig_offset);
1216                 BUG_ON(ret); /* -ENOMEM */
1217
1218                 if (split == start) {
1219                         key.offset = start;
1220                 } else {
1221                         BUG_ON(start != key.offset);
1222                         path->slots[0]--;
1223                         extent_end = end;
1224                 }
1225                 recow = 1;
1226         }
1227
1228         other_start = end;
1229         other_end = 0;
1230         if (extent_mergeable(leaf, path->slots[0] + 1,
1231                              ino, bytenr, orig_offset,
1232                              &other_start, &other_end)) {
1233                 if (recow) {
1234                         btrfs_release_path(path);
1235                         goto again;
1236                 }
1237                 extent_end = other_end;
1238                 del_slot = path->slots[0] + 1;
1239                 del_nr++;
1240                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1241                                         0, root->root_key.objectid,
1242                                         ino, orig_offset);
1243                 BUG_ON(ret); /* -ENOMEM */
1244         }
1245         other_start = 0;
1246         other_end = start;
1247         if (extent_mergeable(leaf, path->slots[0] - 1,
1248                              ino, bytenr, orig_offset,
1249                              &other_start, &other_end)) {
1250                 if (recow) {
1251                         btrfs_release_path(path);
1252                         goto again;
1253                 }
1254                 key.offset = other_start;
1255                 del_slot = path->slots[0];
1256                 del_nr++;
1257                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1258                                         0, root->root_key.objectid,
1259                                         ino, orig_offset);
1260                 BUG_ON(ret); /* -ENOMEM */
1261         }
1262         if (del_nr == 0) {
1263                 fi = btrfs_item_ptr(leaf, path->slots[0],
1264                            struct btrfs_file_extent_item);
1265                 btrfs_set_file_extent_type(leaf, fi,
1266                                            BTRFS_FILE_EXTENT_REG);
1267                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1268                 btrfs_mark_buffer_dirty(leaf);
1269         } else {
1270                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1271                            struct btrfs_file_extent_item);
1272                 btrfs_set_file_extent_type(leaf, fi,
1273                                            BTRFS_FILE_EXTENT_REG);
1274                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1275                 btrfs_set_file_extent_num_bytes(leaf, fi,
1276                                                 extent_end - key.offset);
1277                 btrfs_mark_buffer_dirty(leaf);
1278
1279                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1280                 if (ret < 0) {
1281                         btrfs_abort_transaction(trans, root, ret);
1282                         goto out;
1283                 }
1284         }
1285 out:
1286         btrfs_free_path(path);
1287         return 0;
1288 }
1289
1290 /*
1291  * on error we return an unlocked page and the error value
1292  * on success we return a locked page and 0
1293  */
1294 static int prepare_uptodate_page(struct page *page, u64 pos,
1295                                  bool force_uptodate)
1296 {
1297         int ret = 0;
1298
1299         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1300             !PageUptodate(page)) {
1301                 ret = btrfs_readpage(NULL, page);
1302                 if (ret)
1303                         return ret;
1304                 lock_page(page);
1305                 if (!PageUptodate(page)) {
1306                         unlock_page(page);
1307                         return -EIO;
1308                 }
1309         }
1310         return 0;
1311 }
1312
1313 /*
1314  * this just gets pages into the page cache and locks them down.
1315  */
1316 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1317                                   size_t num_pages, loff_t pos,
1318                                   size_t write_bytes, bool force_uptodate)
1319 {
1320         int i;
1321         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1322         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1323         int err = 0;
1324         int faili;
1325
1326         for (i = 0; i < num_pages; i++) {
1327                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1328                                                mask | __GFP_WRITE);
1329                 if (!pages[i]) {
1330                         faili = i - 1;
1331                         err = -ENOMEM;
1332                         goto fail;
1333                 }
1334
1335                 if (i == 0)
1336                         err = prepare_uptodate_page(pages[i], pos,
1337                                                     force_uptodate);
1338                 if (i == num_pages - 1)
1339                         err = prepare_uptodate_page(pages[i],
1340                                                     pos + write_bytes, false);
1341                 if (err) {
1342                         page_cache_release(pages[i]);
1343                         faili = i - 1;
1344                         goto fail;
1345                 }
1346                 wait_on_page_writeback(pages[i]);
1347         }
1348
1349         return 0;
1350 fail:
1351         while (faili >= 0) {
1352                 unlock_page(pages[faili]);
1353                 page_cache_release(pages[faili]);
1354                 faili--;
1355         }
1356         return err;
1357
1358 }
1359
1360 /*
1361  * This function locks the extent and properly waits for data=ordered extents
1362  * to finish before allowing the pages to be modified if need.
1363  *
1364  * The return value:
1365  * 1 - the extent is locked
1366  * 0 - the extent is not locked, and everything is OK
1367  * -EAGAIN - need re-prepare the pages
1368  * the other < 0 number - Something wrong happens
1369  */
1370 static noinline int
1371 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1372                                 size_t num_pages, loff_t pos,
1373                                 u64 *lockstart, u64 *lockend,
1374                                 struct extent_state **cached_state)
1375 {
1376         u64 start_pos;
1377         u64 last_pos;
1378         int i;
1379         int ret = 0;
1380
1381         start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1382         last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
1383
1384         if (start_pos < inode->i_size) {
1385                 struct btrfs_ordered_extent *ordered;
1386                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1387                                  start_pos, last_pos, 0, cached_state);
1388                 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1389                                                      last_pos - start_pos + 1);
1390                 if (ordered &&
1391                     ordered->file_offset + ordered->len > start_pos &&
1392                     ordered->file_offset <= last_pos) {
1393                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1394                                              start_pos, last_pos,
1395                                              cached_state, GFP_NOFS);
1396                         for (i = 0; i < num_pages; i++) {
1397                                 unlock_page(pages[i]);
1398                                 page_cache_release(pages[i]);
1399                         }
1400                         btrfs_start_ordered_extent(inode, ordered, 1);
1401                         btrfs_put_ordered_extent(ordered);
1402                         return -EAGAIN;
1403                 }
1404                 if (ordered)
1405                         btrfs_put_ordered_extent(ordered);
1406
1407                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1408                                   last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1409                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1410                                   0, 0, cached_state, GFP_NOFS);
1411                 *lockstart = start_pos;
1412                 *lockend = last_pos;
1413                 ret = 1;
1414         }
1415
1416         for (i = 0; i < num_pages; i++) {
1417                 if (clear_page_dirty_for_io(pages[i]))
1418                         account_page_redirty(pages[i]);
1419                 set_page_extent_mapped(pages[i]);
1420                 WARN_ON(!PageLocked(pages[i]));
1421         }
1422
1423         return ret;
1424 }
1425
1426 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1427                                     size_t *write_bytes)
1428 {
1429         struct btrfs_root *root = BTRFS_I(inode)->root;
1430         struct btrfs_ordered_extent *ordered;
1431         u64 lockstart, lockend;
1432         u64 num_bytes;
1433         int ret;
1434
1435         ret = btrfs_start_write_no_snapshoting(root);
1436         if (!ret)
1437                 return -ENOSPC;
1438
1439         lockstart = round_down(pos, root->sectorsize);
1440         lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1441
1442         while (1) {
1443                 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1444                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1445                                                      lockend - lockstart + 1);
1446                 if (!ordered) {
1447                         break;
1448                 }
1449                 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1450                 btrfs_start_ordered_extent(inode, ordered, 1);
1451                 btrfs_put_ordered_extent(ordered);
1452         }
1453
1454         num_bytes = lockend - lockstart + 1;
1455         ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1456         if (ret <= 0) {
1457                 ret = 0;
1458                 btrfs_end_write_no_snapshoting(root);
1459         } else {
1460                 *write_bytes = min_t(size_t, *write_bytes ,
1461                                      num_bytes - pos + lockstart);
1462         }
1463
1464         unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1465
1466         return ret;
1467 }
1468
1469 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1470                                                struct iov_iter *i,
1471                                                loff_t pos)
1472 {
1473         struct inode *inode = file_inode(file);
1474         struct btrfs_root *root = BTRFS_I(inode)->root;
1475         struct page **pages = NULL;
1476         struct extent_state *cached_state = NULL;
1477         u64 release_bytes = 0;
1478         u64 lockstart;
1479         u64 lockend;
1480         size_t num_written = 0;
1481         int nrptrs;
1482         int ret = 0;
1483         bool only_release_metadata = false;
1484         bool force_page_uptodate = false;
1485         bool need_unlock;
1486
1487         nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
1488                         PAGE_CACHE_SIZE / (sizeof(struct page *)));
1489         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1490         nrptrs = max(nrptrs, 8);
1491         pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1492         if (!pages)
1493                 return -ENOMEM;
1494
1495         while (iov_iter_count(i) > 0) {
1496                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1497                 size_t write_bytes = min(iov_iter_count(i),
1498                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1499                                          offset);
1500                 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1501                                                 PAGE_CACHE_SIZE);
1502                 size_t reserve_bytes;
1503                 size_t dirty_pages;
1504                 size_t copied;
1505
1506                 WARN_ON(num_pages > nrptrs);
1507
1508                 /*
1509                  * Fault pages before locking them in prepare_pages
1510                  * to avoid recursive lock
1511                  */
1512                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1513                         ret = -EFAULT;
1514                         break;
1515                 }
1516
1517                 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1518
1519                 if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1520                                              BTRFS_INODE_PREALLOC)) {
1521                         ret = check_can_nocow(inode, pos, &write_bytes);
1522                         if (ret < 0)
1523                                 break;
1524                         if (ret > 0) {
1525                                 /*
1526                                  * For nodata cow case, no need to reserve
1527                                  * data space.
1528                                  */
1529                                 only_release_metadata = true;
1530                                 /*
1531                                  * our prealloc extent may be smaller than
1532                                  * write_bytes, so scale down.
1533                                  */
1534                                 num_pages = DIV_ROUND_UP(write_bytes + offset,
1535                                                          PAGE_CACHE_SIZE);
1536                                 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1537                                 goto reserve_metadata;
1538                         }
1539                 }
1540                 ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1541                 if (ret < 0)
1542                         break;
1543
1544 reserve_metadata:
1545                 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1546                 if (ret) {
1547                         if (!only_release_metadata)
1548                                 btrfs_free_reserved_data_space(inode, pos,
1549                                                                write_bytes);
1550                         else
1551                                 btrfs_end_write_no_snapshoting(root);
1552                         break;
1553                 }
1554
1555                 release_bytes = reserve_bytes;
1556                 need_unlock = false;
1557 again:
1558                 /*
1559                  * This is going to setup the pages array with the number of
1560                  * pages we want, so we don't really need to worry about the
1561                  * contents of pages from loop to loop
1562                  */
1563                 ret = prepare_pages(inode, pages, num_pages,
1564                                     pos, write_bytes,
1565                                     force_page_uptodate);
1566                 if (ret)
1567                         break;
1568
1569                 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1570                                                       pos, &lockstart, &lockend,
1571                                                       &cached_state);
1572                 if (ret < 0) {
1573                         if (ret == -EAGAIN)
1574                                 goto again;
1575                         break;
1576                 } else if (ret > 0) {
1577                         need_unlock = true;
1578                         ret = 0;
1579                 }
1580
1581                 copied = btrfs_copy_from_user(pos, num_pages,
1582                                            write_bytes, pages, i);
1583
1584                 /*
1585                  * if we have trouble faulting in the pages, fall
1586                  * back to one page at a time
1587                  */
1588                 if (copied < write_bytes)
1589                         nrptrs = 1;
1590
1591                 if (copied == 0) {
1592                         force_page_uptodate = true;
1593                         dirty_pages = 0;
1594                 } else {
1595                         force_page_uptodate = false;
1596                         dirty_pages = DIV_ROUND_UP(copied + offset,
1597                                                    PAGE_CACHE_SIZE);
1598                 }
1599
1600                 /*
1601                  * If we had a short copy we need to release the excess delaloc
1602                  * bytes we reserved.  We need to increment outstanding_extents
1603                  * because btrfs_delalloc_release_space will decrement it, but
1604                  * we still have an outstanding extent for the chunk we actually
1605                  * managed to copy.
1606                  */
1607                 if (num_pages > dirty_pages) {
1608                         release_bytes = (num_pages - dirty_pages) <<
1609                                 PAGE_CACHE_SHIFT;
1610                         if (copied > 0) {
1611                                 spin_lock(&BTRFS_I(inode)->lock);
1612                                 BTRFS_I(inode)->outstanding_extents++;
1613                                 spin_unlock(&BTRFS_I(inode)->lock);
1614                         }
1615                         if (only_release_metadata) {
1616                                 btrfs_delalloc_release_metadata(inode,
1617                                                                 release_bytes);
1618                         } else {
1619                                 u64 __pos;
1620
1621                                 __pos = round_down(pos, root->sectorsize) +
1622                                         (dirty_pages << PAGE_CACHE_SHIFT);
1623                                 btrfs_delalloc_release_space(inode, __pos,
1624                                                              release_bytes);
1625                         }
1626                 }
1627
1628                 release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
1629
1630                 if (copied > 0)
1631                         ret = btrfs_dirty_pages(root, inode, pages,
1632                                                 dirty_pages, pos, copied,
1633                                                 NULL);
1634                 if (need_unlock)
1635                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1636                                              lockstart, lockend, &cached_state,
1637                                              GFP_NOFS);
1638                 if (ret) {
1639                         btrfs_drop_pages(pages, num_pages);
1640                         break;
1641                 }
1642
1643                 release_bytes = 0;
1644                 if (only_release_metadata)
1645                         btrfs_end_write_no_snapshoting(root);
1646
1647                 if (only_release_metadata && copied > 0) {
1648                         lockstart = round_down(pos, root->sectorsize);
1649                         lockend = lockstart +
1650                                 (dirty_pages << PAGE_CACHE_SHIFT) - 1;
1651
1652                         set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1653                                        lockend, EXTENT_NORESERVE, NULL,
1654                                        NULL, GFP_NOFS);
1655                         only_release_metadata = false;
1656                 }
1657
1658                 btrfs_drop_pages(pages, num_pages);
1659
1660                 cond_resched();
1661
1662                 balance_dirty_pages_ratelimited(inode->i_mapping);
1663                 if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
1664                         btrfs_btree_balance_dirty(root);
1665
1666                 pos += copied;
1667                 num_written += copied;
1668         }
1669
1670         kfree(pages);
1671
1672         if (release_bytes) {
1673                 if (only_release_metadata) {
1674                         btrfs_end_write_no_snapshoting(root);
1675                         btrfs_delalloc_release_metadata(inode, release_bytes);
1676                 } else {
1677                         btrfs_delalloc_release_space(inode, pos, release_bytes);
1678                 }
1679         }
1680
1681         return num_written ? num_written : ret;
1682 }
1683
1684 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1685                                     struct iov_iter *from,
1686                                     loff_t pos)
1687 {
1688         struct file *file = iocb->ki_filp;
1689         struct inode *inode = file_inode(file);
1690         ssize_t written;
1691         ssize_t written_buffered;
1692         loff_t endbyte;
1693         int err;
1694
1695         written = generic_file_direct_write(iocb, from, pos);
1696
1697         if (written < 0 || !iov_iter_count(from))
1698                 return written;
1699
1700         pos += written;
1701         written_buffered = __btrfs_buffered_write(file, from, pos);
1702         if (written_buffered < 0) {
1703                 err = written_buffered;
1704                 goto out;
1705         }
1706         /*
1707          * Ensure all data is persisted. We want the next direct IO read to be
1708          * able to read what was just written.
1709          */
1710         endbyte = pos + written_buffered - 1;
1711         err = btrfs_fdatawrite_range(inode, pos, endbyte);
1712         if (err)
1713                 goto out;
1714         err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1715         if (err)
1716                 goto out;
1717         written += written_buffered;
1718         iocb->ki_pos = pos + written_buffered;
1719         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1720                                  endbyte >> PAGE_CACHE_SHIFT);
1721 out:
1722         return written ? written : err;
1723 }
1724
1725 static void update_time_for_write(struct inode *inode)
1726 {
1727         struct timespec now;
1728
1729         if (IS_NOCMTIME(inode))
1730                 return;
1731
1732         now = current_fs_time(inode->i_sb);
1733         if (!timespec_equal(&inode->i_mtime, &now))
1734                 inode->i_mtime = now;
1735
1736         if (!timespec_equal(&inode->i_ctime, &now))
1737                 inode->i_ctime = now;
1738
1739         if (IS_I_VERSION(inode))
1740                 inode_inc_iversion(inode);
1741 }
1742
1743 static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1744                                     struct iov_iter *from)
1745 {
1746         struct file *file = iocb->ki_filp;
1747         struct inode *inode = file_inode(file);
1748         struct btrfs_root *root = BTRFS_I(inode)->root;
1749         u64 start_pos;
1750         u64 end_pos;
1751         ssize_t num_written = 0;
1752         bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1753         ssize_t err;
1754         loff_t pos;
1755         size_t count;
1756
1757         mutex_lock(&inode->i_mutex);
1758         err = generic_write_checks(iocb, from);
1759         if (err <= 0) {
1760                 mutex_unlock(&inode->i_mutex);
1761                 return err;
1762         }
1763
1764         current->backing_dev_info = inode_to_bdi(inode);
1765         err = file_remove_privs(file);
1766         if (err) {
1767                 mutex_unlock(&inode->i_mutex);
1768                 goto out;
1769         }
1770
1771         /*
1772          * If BTRFS flips readonly due to some impossible error
1773          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1774          * although we have opened a file as writable, we have
1775          * to stop this write operation to ensure FS consistency.
1776          */
1777         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1778                 mutex_unlock(&inode->i_mutex);
1779                 err = -EROFS;
1780                 goto out;
1781         }
1782
1783         /*
1784          * We reserve space for updating the inode when we reserve space for the
1785          * extent we are going to write, so we will enospc out there.  We don't
1786          * need to start yet another transaction to update the inode as we will
1787          * update the inode when we finish writing whatever data we write.
1788          */
1789         update_time_for_write(inode);
1790
1791         pos = iocb->ki_pos;
1792         count = iov_iter_count(from);
1793         start_pos = round_down(pos, root->sectorsize);
1794         if (start_pos > i_size_read(inode)) {
1795                 /* Expand hole size to cover write data, preventing empty gap */
1796                 end_pos = round_up(pos + count, root->sectorsize);
1797                 err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
1798                 if (err) {
1799                         mutex_unlock(&inode->i_mutex);
1800                         goto out;
1801                 }
1802         }
1803
1804         if (sync)
1805                 atomic_inc(&BTRFS_I(inode)->sync_writers);
1806
1807         if (iocb->ki_flags & IOCB_DIRECT) {
1808                 num_written = __btrfs_direct_write(iocb, from, pos);
1809         } else {
1810                 num_written = __btrfs_buffered_write(file, from, pos);
1811                 if (num_written > 0)
1812                         iocb->ki_pos = pos + num_written;
1813         }
1814
1815         mutex_unlock(&inode->i_mutex);
1816
1817         /*
1818          * We also have to set last_sub_trans to the current log transid,
1819          * otherwise subsequent syncs to a file that's been synced in this
1820          * transaction will appear to have already occured.
1821          */
1822         spin_lock(&BTRFS_I(inode)->lock);
1823         BTRFS_I(inode)->last_sub_trans = root->log_transid;
1824         spin_unlock(&BTRFS_I(inode)->lock);
1825         if (num_written > 0) {
1826                 err = generic_write_sync(file, pos, num_written);
1827                 if (err < 0)
1828                         num_written = err;
1829         }
1830
1831         if (sync)
1832                 atomic_dec(&BTRFS_I(inode)->sync_writers);
1833 out:
1834         current->backing_dev_info = NULL;
1835         return num_written ? num_written : err;
1836 }
1837
1838 int btrfs_release_file(struct inode *inode, struct file *filp)
1839 {
1840         if (filp->private_data)
1841                 btrfs_ioctl_trans_end(filp);
1842         /*
1843          * ordered_data_close is set by settattr when we are about to truncate
1844          * a file from a non-zero size to a zero size.  This tries to
1845          * flush down new bytes that may have been written if the
1846          * application were using truncate to replace a file in place.
1847          */
1848         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1849                                &BTRFS_I(inode)->runtime_flags))
1850                         filemap_flush(inode->i_mapping);
1851         return 0;
1852 }
1853
1854 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1855 {
1856         int ret;
1857
1858         atomic_inc(&BTRFS_I(inode)->sync_writers);
1859         ret = btrfs_fdatawrite_range(inode, start, end);
1860         atomic_dec(&BTRFS_I(inode)->sync_writers);
1861
1862         return ret;
1863 }
1864
1865 /*
1866  * fsync call for both files and directories.  This logs the inode into
1867  * the tree log instead of forcing full commits whenever possible.
1868  *
1869  * It needs to call filemap_fdatawait so that all ordered extent updates are
1870  * in the metadata btree are up to date for copying to the log.
1871  *
1872  * It drops the inode mutex before doing the tree log commit.  This is an
1873  * important optimization for directories because holding the mutex prevents
1874  * new operations on the dir while we write to disk.
1875  */
1876 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1877 {
1878         struct dentry *dentry = file->f_path.dentry;
1879         struct inode *inode = d_inode(dentry);
1880         struct btrfs_root *root = BTRFS_I(inode)->root;
1881         struct btrfs_trans_handle *trans;
1882         struct btrfs_log_ctx ctx;
1883         int ret = 0;
1884         bool full_sync = 0;
1885         const u64 len = end - start + 1;
1886
1887         trace_btrfs_sync_file(file, datasync);
1888
1889         /*
1890          * We write the dirty pages in the range and wait until they complete
1891          * out of the ->i_mutex. If so, we can flush the dirty pages by
1892          * multi-task, and make the performance up.  See
1893          * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1894          */
1895         ret = start_ordered_ops(inode, start, end);
1896         if (ret)
1897                 return ret;
1898
1899         mutex_lock(&inode->i_mutex);
1900         atomic_inc(&root->log_batch);
1901         full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1902                              &BTRFS_I(inode)->runtime_flags);
1903         /*
1904          * We might have have had more pages made dirty after calling
1905          * start_ordered_ops and before acquiring the inode's i_mutex.
1906          */
1907         if (full_sync) {
1908                 /*
1909                  * For a full sync, we need to make sure any ordered operations
1910                  * start and finish before we start logging the inode, so that
1911                  * all extents are persisted and the respective file extent
1912                  * items are in the fs/subvol btree.
1913                  */
1914                 ret = btrfs_wait_ordered_range(inode, start, len);
1915         } else {
1916                 /*
1917                  * Start any new ordered operations before starting to log the
1918                  * inode. We will wait for them to finish in btrfs_sync_log().
1919                  *
1920                  * Right before acquiring the inode's mutex, we might have new
1921                  * writes dirtying pages, which won't immediately start the
1922                  * respective ordered operations - that is done through the
1923                  * fill_delalloc callbacks invoked from the writepage and
1924                  * writepages address space operations. So make sure we start
1925                  * all ordered operations before starting to log our inode. Not
1926                  * doing this means that while logging the inode, writeback
1927                  * could start and invoke writepage/writepages, which would call
1928                  * the fill_delalloc callbacks (cow_file_range,
1929                  * submit_compressed_extents). These callbacks add first an
1930                  * extent map to the modified list of extents and then create
1931                  * the respective ordered operation, which means in
1932                  * tree-log.c:btrfs_log_inode() we might capture all existing
1933                  * ordered operations (with btrfs_get_logged_extents()) before
1934                  * the fill_delalloc callback adds its ordered operation, and by
1935                  * the time we visit the modified list of extent maps (with
1936                  * btrfs_log_changed_extents()), we see and process the extent
1937                  * map they created. We then use the extent map to construct a
1938                  * file extent item for logging without waiting for the
1939                  * respective ordered operation to finish - this file extent
1940                  * item points to a disk location that might not have yet been
1941                  * written to, containing random data - so after a crash a log
1942                  * replay will make our inode have file extent items that point
1943                  * to disk locations containing invalid data, as we returned
1944                  * success to userspace without waiting for the respective
1945                  * ordered operation to finish, because it wasn't captured by
1946                  * btrfs_get_logged_extents().
1947                  */
1948                 ret = start_ordered_ops(inode, start, end);
1949         }
1950         if (ret) {
1951                 mutex_unlock(&inode->i_mutex);
1952                 goto out;
1953         }
1954         atomic_inc(&root->log_batch);
1955
1956         /*
1957          * If the last transaction that changed this file was before the current
1958          * transaction and we have the full sync flag set in our inode, we can
1959          * bail out now without any syncing.
1960          *
1961          * Note that we can't bail out if the full sync flag isn't set. This is
1962          * because when the full sync flag is set we start all ordered extents
1963          * and wait for them to fully complete - when they complete they update
1964          * the inode's last_trans field through:
1965          *
1966          *     btrfs_finish_ordered_io() ->
1967          *         btrfs_update_inode_fallback() ->
1968          *             btrfs_update_inode() ->
1969          *                 btrfs_set_inode_last_trans()
1970          *
1971          * So we are sure that last_trans is up to date and can do this check to
1972          * bail out safely. For the fast path, when the full sync flag is not
1973          * set in our inode, we can not do it because we start only our ordered
1974          * extents and don't wait for them to complete (that is when
1975          * btrfs_finish_ordered_io runs), so here at this point their last_trans
1976          * value might be less than or equals to fs_info->last_trans_committed,
1977          * and setting a speculative last_trans for an inode when a buffered
1978          * write is made (such as fs_info->generation + 1 for example) would not
1979          * be reliable since after setting the value and before fsync is called
1980          * any number of transactions can start and commit (transaction kthread
1981          * commits the current transaction periodically), and a transaction
1982          * commit does not start nor waits for ordered extents to complete.
1983          */
1984         smp_mb();
1985         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1986             (BTRFS_I(inode)->last_trans <=
1987              root->fs_info->last_trans_committed &&
1988              (full_sync ||
1989               !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
1990                 /*
1991                  * We'v had everything committed since the last time we were
1992                  * modified so clear this flag in case it was set for whatever
1993                  * reason, it's no longer relevant.
1994                  */
1995                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1996                           &BTRFS_I(inode)->runtime_flags);
1997                 mutex_unlock(&inode->i_mutex);
1998                 goto out;
1999         }
2000
2001         /*
2002          * ok we haven't committed the transaction yet, lets do a commit
2003          */
2004         if (file->private_data)
2005                 btrfs_ioctl_trans_end(file);
2006
2007         /*
2008          * We use start here because we will need to wait on the IO to complete
2009          * in btrfs_sync_log, which could require joining a transaction (for
2010          * example checking cross references in the nocow path).  If we use join
2011          * here we could get into a situation where we're waiting on IO to
2012          * happen that is blocked on a transaction trying to commit.  With start
2013          * we inc the extwriter counter, so we wait for all extwriters to exit
2014          * before we start blocking join'ers.  This comment is to keep somebody
2015          * from thinking they are super smart and changing this to
2016          * btrfs_join_transaction *cough*Josef*cough*.
2017          */
2018         trans = btrfs_start_transaction(root, 0);
2019         if (IS_ERR(trans)) {
2020                 ret = PTR_ERR(trans);
2021                 mutex_unlock(&inode->i_mutex);
2022                 goto out;
2023         }
2024         trans->sync = true;
2025
2026         btrfs_init_log_ctx(&ctx);
2027
2028         ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2029         if (ret < 0) {
2030                 /* Fallthrough and commit/free transaction. */
2031                 ret = 1;
2032         }
2033
2034         /* we've logged all the items and now have a consistent
2035          * version of the file in the log.  It is possible that
2036          * someone will come in and modify the file, but that's
2037          * fine because the log is consistent on disk, and we
2038          * have references to all of the file's extents
2039          *
2040          * It is possible that someone will come in and log the
2041          * file again, but that will end up using the synchronization
2042          * inside btrfs_sync_log to keep things safe.
2043          */
2044         mutex_unlock(&inode->i_mutex);
2045
2046         /*
2047          * If any of the ordered extents had an error, just return it to user
2048          * space, so that the application knows some writes didn't succeed and
2049          * can take proper action (retry for e.g.). Blindly committing the
2050          * transaction in this case, would fool userspace that everything was
2051          * successful. And we also want to make sure our log doesn't contain
2052          * file extent items pointing to extents that weren't fully written to -
2053          * just like in the non fast fsync path, where we check for the ordered
2054          * operation's error flag before writing to the log tree and return -EIO
2055          * if any of them had this flag set (btrfs_wait_ordered_range) -
2056          * therefore we need to check for errors in the ordered operations,
2057          * which are indicated by ctx.io_err.
2058          */
2059         if (ctx.io_err) {
2060                 btrfs_end_transaction(trans, root);
2061                 ret = ctx.io_err;
2062                 goto out;
2063         }
2064
2065         if (ret != BTRFS_NO_LOG_SYNC) {
2066                 if (!ret) {
2067                         ret = btrfs_sync_log(trans, root, &ctx);
2068                         if (!ret) {
2069                                 ret = btrfs_end_transaction(trans, root);
2070                                 goto out;
2071                         }
2072                 }
2073                 if (!full_sync) {
2074                         ret = btrfs_wait_ordered_range(inode, start,
2075                                                        end - start + 1);
2076                         if (ret) {
2077                                 btrfs_end_transaction(trans, root);
2078                                 goto out;
2079                         }
2080                 }
2081                 ret = btrfs_commit_transaction(trans, root);
2082         } else {
2083                 ret = btrfs_end_transaction(trans, root);
2084         }
2085 out:
2086         return ret > 0 ? -EIO : ret;
2087 }
2088
2089 static const struct vm_operations_struct btrfs_file_vm_ops = {
2090         .fault          = filemap_fault,
2091         .map_pages      = filemap_map_pages,
2092         .page_mkwrite   = btrfs_page_mkwrite,
2093 };
2094
2095 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2096 {
2097         struct address_space *mapping = filp->f_mapping;
2098
2099         if (!mapping->a_ops->readpage)
2100                 return -ENOEXEC;
2101
2102         file_accessed(filp);
2103         vma->vm_ops = &btrfs_file_vm_ops;
2104
2105         return 0;
2106 }
2107
2108 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2109                           int slot, u64 start, u64 end)
2110 {
2111         struct btrfs_file_extent_item *fi;
2112         struct btrfs_key key;
2113
2114         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2115                 return 0;
2116
2117         btrfs_item_key_to_cpu(leaf, &key, slot);
2118         if (key.objectid != btrfs_ino(inode) ||
2119             key.type != BTRFS_EXTENT_DATA_KEY)
2120                 return 0;
2121
2122         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2123
2124         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2125                 return 0;
2126
2127         if (btrfs_file_extent_disk_bytenr(leaf, fi))
2128                 return 0;
2129
2130         if (key.offset == end)
2131                 return 1;
2132         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2133                 return 1;
2134         return 0;
2135 }
2136
2137 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2138                       struct btrfs_path *path, u64 offset, u64 end)
2139 {
2140         struct btrfs_root *root = BTRFS_I(inode)->root;
2141         struct extent_buffer *leaf;
2142         struct btrfs_file_extent_item *fi;
2143         struct extent_map *hole_em;
2144         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2145         struct btrfs_key key;
2146         int ret;
2147
2148         if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2149                 goto out;
2150
2151         key.objectid = btrfs_ino(inode);
2152         key.type = BTRFS_EXTENT_DATA_KEY;
2153         key.offset = offset;
2154
2155         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2156         if (ret < 0)
2157                 return ret;
2158         BUG_ON(!ret);
2159
2160         leaf = path->nodes[0];
2161         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2162                 u64 num_bytes;
2163
2164                 path->slots[0]--;
2165                 fi = btrfs_item_ptr(leaf, path->slots[0],
2166                                     struct btrfs_file_extent_item);
2167                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2168                         end - offset;
2169                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2170                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2171                 btrfs_set_file_extent_offset(leaf, fi, 0);
2172                 btrfs_mark_buffer_dirty(leaf);
2173                 goto out;
2174         }
2175
2176         if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2177                 u64 num_bytes;
2178
2179                 key.offset = offset;
2180                 btrfs_set_item_key_safe(root->fs_info, path, &key);
2181                 fi = btrfs_item_ptr(leaf, path->slots[0],
2182                                     struct btrfs_file_extent_item);
2183                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2184                         offset;
2185                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2186                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2187                 btrfs_set_file_extent_offset(leaf, fi, 0);
2188                 btrfs_mark_buffer_dirty(leaf);
2189                 goto out;
2190         }
2191         btrfs_release_path(path);
2192
2193         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2194                                        0, 0, end - offset, 0, end - offset,
2195                                        0, 0, 0);
2196         if (ret)
2197                 return ret;
2198
2199 out:
2200         btrfs_release_path(path);
2201
2202         hole_em = alloc_extent_map();
2203         if (!hole_em) {
2204                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2205                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2206                         &BTRFS_I(inode)->runtime_flags);
2207         } else {
2208                 hole_em->start = offset;
2209                 hole_em->len = end - offset;
2210                 hole_em->ram_bytes = hole_em->len;
2211                 hole_em->orig_start = offset;
2212
2213                 hole_em->block_start = EXTENT_MAP_HOLE;
2214                 hole_em->block_len = 0;
2215                 hole_em->orig_block_len = 0;
2216                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2217                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2218                 hole_em->generation = trans->transid;
2219
2220                 do {
2221                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2222                         write_lock(&em_tree->lock);
2223                         ret = add_extent_mapping(em_tree, hole_em, 1);
2224                         write_unlock(&em_tree->lock);
2225                 } while (ret == -EEXIST);
2226                 free_extent_map(hole_em);
2227                 if (ret)
2228                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2229                                 &BTRFS_I(inode)->runtime_flags);
2230         }
2231
2232         return 0;
2233 }
2234
2235 /*
2236  * Find a hole extent on given inode and change start/len to the end of hole
2237  * extent.(hole/vacuum extent whose em->start <= start &&
2238  *         em->start + em->len > start)
2239  * When a hole extent is found, return 1 and modify start/len.
2240  */
2241 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2242 {
2243         struct extent_map *em;
2244         int ret = 0;
2245
2246         em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2247         if (IS_ERR_OR_NULL(em)) {
2248                 if (!em)
2249                         ret = -ENOMEM;
2250                 else
2251                         ret = PTR_ERR(em);
2252                 return ret;
2253         }
2254
2255         /* Hole or vacuum extent(only exists in no-hole mode) */
2256         if (em->block_start == EXTENT_MAP_HOLE) {
2257                 ret = 1;
2258                 *len = em->start + em->len > *start + *len ?
2259                        0 : *start + *len - em->start - em->len;
2260                 *start = em->start + em->len;
2261         }
2262         free_extent_map(em);
2263         return ret;
2264 }
2265
2266 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2267 {
2268         struct btrfs_root *root = BTRFS_I(inode)->root;
2269         struct extent_state *cached_state = NULL;
2270         struct btrfs_path *path;
2271         struct btrfs_block_rsv *rsv;
2272         struct btrfs_trans_handle *trans;
2273         u64 lockstart;
2274         u64 lockend;
2275         u64 tail_start;
2276         u64 tail_len;
2277         u64 orig_start = offset;
2278         u64 cur_offset;
2279         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2280         u64 drop_end;
2281         int ret = 0;
2282         int err = 0;
2283         unsigned int rsv_count;
2284         bool same_page;
2285         bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2286         u64 ino_size;
2287         bool truncated_page = false;
2288         bool updated_inode = false;
2289
2290         ret = btrfs_wait_ordered_range(inode, offset, len);
2291         if (ret)
2292                 return ret;
2293
2294         mutex_lock(&inode->i_mutex);
2295         ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
2296         ret = find_first_non_hole(inode, &offset, &len);
2297         if (ret < 0)
2298                 goto out_only_mutex;
2299         if (ret && !len) {
2300                 /* Already in a large hole */
2301                 ret = 0;
2302                 goto out_only_mutex;
2303         }
2304
2305         lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
2306         lockend = round_down(offset + len,
2307                              BTRFS_I(inode)->root->sectorsize) - 1;
2308         same_page = ((offset >> PAGE_CACHE_SHIFT) ==
2309                     ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2310
2311         /*
2312          * We needn't truncate any page which is beyond the end of the file
2313          * because we are sure there is no data there.
2314          */
2315         /*
2316          * Only do this if we are in the same page and we aren't doing the
2317          * entire page.
2318          */
2319         if (same_page && len < PAGE_CACHE_SIZE) {
2320                 if (offset < ino_size) {
2321                         truncated_page = true;
2322                         ret = btrfs_truncate_page(inode, offset, len, 0);
2323                 } else {
2324                         ret = 0;
2325                 }
2326                 goto out_only_mutex;
2327         }
2328
2329         /* zero back part of the first page */
2330         if (offset < ino_size) {
2331                 truncated_page = true;
2332                 ret = btrfs_truncate_page(inode, offset, 0, 0);
2333                 if (ret) {
2334                         mutex_unlock(&inode->i_mutex);
2335                         return ret;
2336                 }
2337         }
2338
2339         /* Check the aligned pages after the first unaligned page,
2340          * if offset != orig_start, which means the first unaligned page
2341          * including serveral following pages are already in holes,
2342          * the extra check can be skipped */
2343         if (offset == orig_start) {
2344                 /* after truncate page, check hole again */
2345                 len = offset + len - lockstart;
2346                 offset = lockstart;
2347                 ret = find_first_non_hole(inode, &offset, &len);
2348                 if (ret < 0)
2349                         goto out_only_mutex;
2350                 if (ret && !len) {
2351                         ret = 0;
2352                         goto out_only_mutex;
2353                 }
2354                 lockstart = offset;
2355         }
2356
2357         /* Check the tail unaligned part is in a hole */
2358         tail_start = lockend + 1;
2359         tail_len = offset + len - tail_start;
2360         if (tail_len) {
2361                 ret = find_first_non_hole(inode, &tail_start, &tail_len);
2362                 if (unlikely(ret < 0))
2363                         goto out_only_mutex;
2364                 if (!ret) {
2365                         /* zero the front end of the last page */
2366                         if (tail_start + tail_len < ino_size) {
2367                                 truncated_page = true;
2368                                 ret = btrfs_truncate_page(inode,
2369                                                 tail_start + tail_len, 0, 1);
2370                                 if (ret)
2371                                         goto out_only_mutex;
2372                         }
2373                 }
2374         }
2375
2376         if (lockend < lockstart) {
2377                 ret = 0;
2378                 goto out_only_mutex;
2379         }
2380
2381         while (1) {
2382                 struct btrfs_ordered_extent *ordered;
2383
2384                 truncate_pagecache_range(inode, lockstart, lockend);
2385
2386                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2387                                  0, &cached_state);
2388                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2389
2390                 /*
2391                  * We need to make sure we have no ordered extents in this range
2392                  * and nobody raced in and read a page in this range, if we did
2393                  * we need to try again.
2394                  */
2395                 if ((!ordered ||
2396                     (ordered->file_offset + ordered->len <= lockstart ||
2397                      ordered->file_offset > lockend)) &&
2398                      !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2399                         if (ordered)
2400                                 btrfs_put_ordered_extent(ordered);
2401                         break;
2402                 }
2403                 if (ordered)
2404                         btrfs_put_ordered_extent(ordered);
2405                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2406                                      lockend, &cached_state, GFP_NOFS);
2407                 ret = btrfs_wait_ordered_range(inode, lockstart,
2408                                                lockend - lockstart + 1);
2409                 if (ret) {
2410                         mutex_unlock(&inode->i_mutex);
2411                         return ret;
2412                 }
2413         }
2414
2415         path = btrfs_alloc_path();
2416         if (!path) {
2417                 ret = -ENOMEM;
2418                 goto out;
2419         }
2420
2421         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2422         if (!rsv) {
2423                 ret = -ENOMEM;
2424                 goto out_free;
2425         }
2426         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2427         rsv->failfast = 1;
2428
2429         /*
2430          * 1 - update the inode
2431          * 1 - removing the extents in the range
2432          * 1 - adding the hole extent if no_holes isn't set
2433          */
2434         rsv_count = no_holes ? 2 : 3;
2435         trans = btrfs_start_transaction(root, rsv_count);
2436         if (IS_ERR(trans)) {
2437                 err = PTR_ERR(trans);
2438                 goto out_free;
2439         }
2440
2441         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2442                                       min_size);
2443         BUG_ON(ret);
2444         trans->block_rsv = rsv;
2445
2446         cur_offset = lockstart;
2447         len = lockend - cur_offset;
2448         while (cur_offset < lockend) {
2449                 ret = __btrfs_drop_extents(trans, root, inode, path,
2450                                            cur_offset, lockend + 1,
2451                                            &drop_end, 1, 0, 0, NULL);
2452                 if (ret != -ENOSPC)
2453                         break;
2454
2455                 trans->block_rsv = &root->fs_info->trans_block_rsv;
2456
2457                 if (cur_offset < ino_size) {
2458                         ret = fill_holes(trans, inode, path, cur_offset,
2459                                          drop_end);
2460                         if (ret) {
2461                                 err = ret;
2462                                 break;
2463                         }
2464                 }
2465
2466                 cur_offset = drop_end;
2467
2468                 ret = btrfs_update_inode(trans, root, inode);
2469                 if (ret) {
2470                         err = ret;
2471                         break;
2472                 }
2473
2474                 btrfs_end_transaction(trans, root);
2475                 btrfs_btree_balance_dirty(root);
2476
2477                 trans = btrfs_start_transaction(root, rsv_count);
2478                 if (IS_ERR(trans)) {
2479                         ret = PTR_ERR(trans);
2480                         trans = NULL;
2481                         break;
2482                 }
2483
2484                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2485                                               rsv, min_size);
2486                 BUG_ON(ret);    /* shouldn't happen */
2487                 trans->block_rsv = rsv;
2488
2489                 ret = find_first_non_hole(inode, &cur_offset, &len);
2490                 if (unlikely(ret < 0))
2491                         break;
2492                 if (ret && !len) {
2493                         ret = 0;
2494                         break;
2495                 }
2496         }
2497
2498         if (ret) {
2499                 err = ret;
2500                 goto out_trans;
2501         }
2502
2503         trans->block_rsv = &root->fs_info->trans_block_rsv;
2504         /*
2505          * If we are using the NO_HOLES feature we might have had already an
2506          * hole that overlaps a part of the region [lockstart, lockend] and
2507          * ends at (or beyond) lockend. Since we have no file extent items to
2508          * represent holes, drop_end can be less than lockend and so we must
2509          * make sure we have an extent map representing the existing hole (the
2510          * call to __btrfs_drop_extents() might have dropped the existing extent
2511          * map representing the existing hole), otherwise the fast fsync path
2512          * will not record the existence of the hole region
2513          * [existing_hole_start, lockend].
2514          */
2515         if (drop_end <= lockend)
2516                 drop_end = lockend + 1;
2517         /*
2518          * Don't insert file hole extent item if it's for a range beyond eof
2519          * (because it's useless) or if it represents a 0 bytes range (when
2520          * cur_offset == drop_end).
2521          */
2522         if (cur_offset < ino_size && cur_offset < drop_end) {
2523                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2524                 if (ret) {
2525                         err = ret;
2526                         goto out_trans;
2527                 }
2528         }
2529
2530 out_trans:
2531         if (!trans)
2532                 goto out_free;
2533
2534         inode_inc_iversion(inode);
2535         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2536
2537         trans->block_rsv = &root->fs_info->trans_block_rsv;
2538         ret = btrfs_update_inode(trans, root, inode);
2539         updated_inode = true;
2540         btrfs_end_transaction(trans, root);
2541         btrfs_btree_balance_dirty(root);
2542 out_free:
2543         btrfs_free_path(path);
2544         btrfs_free_block_rsv(root, rsv);
2545 out:
2546         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2547                              &cached_state, GFP_NOFS);
2548 out_only_mutex:
2549         if (!updated_inode && truncated_page && !ret && !err) {
2550                 /*
2551                  * If we only end up zeroing part of a page, we still need to
2552                  * update the inode item, so that all the time fields are
2553                  * updated as well as the necessary btrfs inode in memory fields
2554                  * for detecting, at fsync time, if the inode isn't yet in the
2555                  * log tree or it's there but not up to date.
2556                  */
2557                 trans = btrfs_start_transaction(root, 1);
2558                 if (IS_ERR(trans)) {
2559                         err = PTR_ERR(trans);
2560                 } else {
2561                         err = btrfs_update_inode(trans, root, inode);
2562                         ret = btrfs_end_transaction(trans, root);
2563                 }
2564         }
2565         mutex_unlock(&inode->i_mutex);
2566         if (ret && !err)
2567                 err = ret;
2568         return err;
2569 }
2570
2571 /* Helper structure to record which range is already reserved */
2572 struct falloc_range {
2573         struct list_head list;
2574         u64 start;
2575         u64 len;
2576 };
2577
2578 /*
2579  * Helper function to add falloc range
2580  *
2581  * Caller should have locked the larger range of extent containing
2582  * [start, len)
2583  */
2584 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2585 {
2586         struct falloc_range *prev = NULL;
2587         struct falloc_range *range = NULL;
2588
2589         if (list_empty(head))
2590                 goto insert;
2591
2592         /*
2593          * As fallocate iterate by bytenr order, we only need to check
2594          * the last range.
2595          */
2596         prev = list_entry(head->prev, struct falloc_range, list);
2597         if (prev->start + prev->len == start) {
2598                 prev->len += len;
2599                 return 0;
2600         }
2601 insert:
2602         range = kmalloc(sizeof(*range), GFP_NOFS);
2603         if (!range)
2604                 return -ENOMEM;
2605         range->start = start;
2606         range->len = len;
2607         list_add_tail(&range->list, head);
2608         return 0;
2609 }
2610
2611 static long btrfs_fallocate(struct file *file, int mode,
2612                             loff_t offset, loff_t len)
2613 {
2614         struct inode *inode = file_inode(file);
2615         struct extent_state *cached_state = NULL;
2616         struct falloc_range *range;
2617         struct falloc_range *tmp;
2618         struct list_head reserve_list;
2619         u64 cur_offset;
2620         u64 last_byte;
2621         u64 alloc_start;
2622         u64 alloc_end;
2623         u64 alloc_hint = 0;
2624         u64 locked_end;
2625         u64 actual_end = 0;
2626         struct extent_map *em;
2627         int blocksize = BTRFS_I(inode)->root->sectorsize;
2628         int ret;
2629
2630         alloc_start = round_down(offset, blocksize);
2631         alloc_end = round_up(offset + len, blocksize);
2632
2633         /* Make sure we aren't being give some crap mode */
2634         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2635                 return -EOPNOTSUPP;
2636
2637         if (mode & FALLOC_FL_PUNCH_HOLE)
2638                 return btrfs_punch_hole(inode, offset, len);
2639
2640         /*
2641          * Only trigger disk allocation, don't trigger qgroup reserve
2642          *
2643          * For qgroup space, it will be checked later.
2644          */
2645         ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
2646         if (ret < 0)
2647                 return ret;
2648
2649         mutex_lock(&inode->i_mutex);
2650         ret = inode_newsize_ok(inode, alloc_end);
2651         if (ret)
2652                 goto out;
2653
2654         /*
2655          * TODO: Move these two operations after we have checked
2656          * accurate reserved space, or fallocate can still fail but
2657          * with page truncated or size expanded.
2658          *
2659          * But that's a minor problem and won't do much harm BTW.
2660          */
2661         if (alloc_start > inode->i_size) {
2662                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2663                                         alloc_start);
2664                 if (ret)
2665                         goto out;
2666         } else if (offset + len > inode->i_size) {
2667                 /*
2668                  * If we are fallocating from the end of the file onward we
2669                  * need to zero out the end of the page if i_size lands in the
2670                  * middle of a page.
2671                  */
2672                 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
2673                 if (ret)
2674                         goto out;
2675         }
2676
2677         /*
2678          * wait for ordered IO before we have any locks.  We'll loop again
2679          * below with the locks held.
2680          */
2681         ret = btrfs_wait_ordered_range(inode, alloc_start,
2682                                        alloc_end - alloc_start);
2683         if (ret)
2684                 goto out;
2685
2686         locked_end = alloc_end - 1;
2687         while (1) {
2688                 struct btrfs_ordered_extent *ordered;
2689
2690                 /* the extent lock is ordered inside the running
2691                  * transaction
2692                  */
2693                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2694                                  locked_end, 0, &cached_state);
2695                 ordered = btrfs_lookup_first_ordered_extent(inode,
2696                                                             alloc_end - 1);
2697                 if (ordered &&
2698                     ordered->file_offset + ordered->len > alloc_start &&
2699                     ordered->file_offset < alloc_end) {
2700                         btrfs_put_ordered_extent(ordered);
2701                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2702                                              alloc_start, locked_end,
2703                                              &cached_state, GFP_NOFS);
2704                         /*
2705                          * we can't wait on the range with the transaction
2706                          * running or with the extent lock held
2707                          */
2708                         ret = btrfs_wait_ordered_range(inode, alloc_start,
2709                                                        alloc_end - alloc_start);
2710                         if (ret)
2711                                 goto out;
2712                 } else {
2713                         if (ordered)
2714                                 btrfs_put_ordered_extent(ordered);
2715                         break;
2716                 }
2717         }
2718
2719         /* First, check if we exceed the qgroup limit */
2720         INIT_LIST_HEAD(&reserve_list);
2721         cur_offset = alloc_start;
2722         while (1) {
2723                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2724                                       alloc_end - cur_offset, 0);
2725                 if (IS_ERR_OR_NULL(em)) {
2726                         if (!em)
2727                                 ret = -ENOMEM;
2728                         else
2729                                 ret = PTR_ERR(em);
2730                         break;
2731                 }
2732                 last_byte = min(extent_map_end(em), alloc_end);
2733                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2734                 last_byte = ALIGN(last_byte, blocksize);
2735                 if (em->block_start == EXTENT_MAP_HOLE ||
2736                     (cur_offset >= inode->i_size &&
2737                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2738                         ret = add_falloc_range(&reserve_list, cur_offset,
2739                                                last_byte - cur_offset);
2740                         if (ret < 0) {
2741                                 free_extent_map(em);
2742                                 break;
2743                         }
2744                         ret = btrfs_qgroup_reserve_data(inode, cur_offset,
2745                                         last_byte - cur_offset);
2746                         if (ret < 0)
2747                                 break;
2748                 }
2749                 free_extent_map(em);
2750                 cur_offset = last_byte;
2751                 if (cur_offset >= alloc_end)
2752                         break;
2753         }
2754
2755         /*
2756          * If ret is still 0, means we're OK to fallocate.
2757          * Or just cleanup the list and exit.
2758          */
2759         list_for_each_entry_safe(range, tmp, &reserve_list, list) {
2760                 if (!ret)
2761                         ret = btrfs_prealloc_file_range(inode, mode,
2762                                         range->start,
2763                                         range->len, 1 << inode->i_blkbits,
2764                                         offset + len, &alloc_hint);
2765                 list_del(&range->list);
2766                 kfree(range);
2767         }
2768         if (ret < 0)
2769                 goto out_unlock;
2770
2771         if (actual_end > inode->i_size &&
2772             !(mode & FALLOC_FL_KEEP_SIZE)) {
2773                 struct btrfs_trans_handle *trans;
2774                 struct btrfs_root *root = BTRFS_I(inode)->root;
2775
2776                 /*
2777                  * We didn't need to allocate any more space, but we
2778                  * still extended the size of the file so we need to
2779                  * update i_size and the inode item.
2780                  */
2781                 trans = btrfs_start_transaction(root, 1);
2782                 if (IS_ERR(trans)) {
2783                         ret = PTR_ERR(trans);
2784                 } else {
2785                         inode->i_ctime = CURRENT_TIME;
2786                         i_size_write(inode, actual_end);
2787                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2788                         ret = btrfs_update_inode(trans, root, inode);
2789                         if (ret)
2790                                 btrfs_end_transaction(trans, root);
2791                         else
2792                                 ret = btrfs_end_transaction(trans, root);
2793                 }
2794         }
2795 out_unlock:
2796         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2797                              &cached_state, GFP_NOFS);
2798 out:
2799         /*
2800          * As we waited the extent range, the data_rsv_map must be empty
2801          * in the range, as written data range will be released from it.
2802          * And for prealloacted extent, it will also be released when
2803          * its metadata is written.
2804          * So this is completely used as cleanup.
2805          */
2806         btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
2807         mutex_unlock(&inode->i_mutex);
2808         /* Let go of our reservation. */
2809         btrfs_free_reserved_data_space(inode, alloc_start,
2810                                        alloc_end - alloc_start);
2811         return ret;
2812 }
2813
2814 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2815 {
2816         struct btrfs_root *root = BTRFS_I(inode)->root;
2817         struct extent_map *em = NULL;
2818         struct extent_state *cached_state = NULL;
2819         u64 lockstart;
2820         u64 lockend;
2821         u64 start;
2822         u64 len;
2823         int ret = 0;
2824
2825         if (inode->i_size == 0)
2826                 return -ENXIO;
2827
2828         /*
2829          * *offset can be negative, in this case we start finding DATA/HOLE from
2830          * the very start of the file.
2831          */
2832         start = max_t(loff_t, 0, *offset);
2833
2834         lockstart = round_down(start, root->sectorsize);
2835         lockend = round_up(i_size_read(inode), root->sectorsize);
2836         if (lockend <= lockstart)
2837                 lockend = lockstart + root->sectorsize;
2838         lockend--;
2839         len = lockend - lockstart + 1;
2840
2841         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2842                          &cached_state);
2843
2844         while (start < inode->i_size) {
2845                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2846                 if (IS_ERR(em)) {
2847                         ret = PTR_ERR(em);
2848                         em = NULL;
2849                         break;
2850                 }
2851
2852                 if (whence == SEEK_HOLE &&
2853                     (em->block_start == EXTENT_MAP_HOLE ||
2854                      test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2855                         break;
2856                 else if (whence == SEEK_DATA &&
2857                            (em->block_start != EXTENT_MAP_HOLE &&
2858                             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2859                         break;
2860
2861                 start = em->start + em->len;
2862                 free_extent_map(em);
2863                 em = NULL;
2864                 cond_resched();
2865         }
2866         free_extent_map(em);
2867         if (!ret) {
2868                 if (whence == SEEK_DATA && start >= inode->i_size)
2869                         ret = -ENXIO;
2870                 else
2871                         *offset = min_t(loff_t, start, inode->i_size);
2872         }
2873         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2874                              &cached_state, GFP_NOFS);
2875         return ret;
2876 }
2877
2878 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2879 {
2880         struct inode *inode = file->f_mapping->host;
2881         int ret;
2882
2883         mutex_lock(&inode->i_mutex);
2884         switch (whence) {
2885         case SEEK_END:
2886         case SEEK_CUR:
2887                 offset = generic_file_llseek(file, offset, whence);
2888                 goto out;
2889         case SEEK_DATA:
2890         case SEEK_HOLE:
2891                 if (offset >= i_size_read(inode)) {
2892                         mutex_unlock(&inode->i_mutex);
2893                         return -ENXIO;
2894                 }
2895
2896                 ret = find_desired_extent(inode, &offset, whence);
2897                 if (ret) {
2898                         mutex_unlock(&inode->i_mutex);
2899                         return ret;
2900                 }
2901         }
2902
2903         offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2904 out:
2905         mutex_unlock(&inode->i_mutex);
2906         return offset;
2907 }
2908
2909 const struct file_operations btrfs_file_operations = {
2910         .llseek         = btrfs_file_llseek,
2911         .read_iter      = generic_file_read_iter,
2912         .splice_read    = generic_file_splice_read,
2913         .write_iter     = btrfs_file_write_iter,
2914         .mmap           = btrfs_file_mmap,
2915         .open           = generic_file_open,
2916         .release        = btrfs_release_file,
2917         .fsync          = btrfs_sync_file,
2918         .fallocate      = btrfs_fallocate,
2919         .unlocked_ioctl = btrfs_ioctl,
2920 #ifdef CONFIG_COMPAT
2921         .compat_ioctl   = btrfs_ioctl,
2922 #endif
2923 };
2924
2925 void btrfs_auto_defrag_exit(void)
2926 {
2927         if (btrfs_inode_defrag_cachep)
2928                 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2929 }
2930
2931 int btrfs_auto_defrag_init(void)
2932 {
2933         btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2934                                         sizeof(struct inode_defrag), 0,
2935                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2936                                         NULL);
2937         if (!btrfs_inode_defrag_cachep)
2938                 return -ENOMEM;
2939
2940         return 0;
2941 }
2942
2943 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2944 {
2945         int ret;
2946
2947         /*
2948          * So with compression we will find and lock a dirty page and clear the
2949          * first one as dirty, setup an async extent, and immediately return
2950          * with the entire range locked but with nobody actually marked with
2951          * writeback.  So we can't just filemap_write_and_wait_range() and
2952          * expect it to work since it will just kick off a thread to do the
2953          * actual work.  So we need to call filemap_fdatawrite_range _again_
2954          * since it will wait on the page lock, which won't be unlocked until
2955          * after the pages have been marked as writeback and so we're good to go
2956          * from there.  We have to do this otherwise we'll miss the ordered
2957          * extents and that results in badness.  Please Josef, do not think you
2958          * know better and pull this out at some point in the future, it is
2959          * right and you are wrong.
2960          */
2961         ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2962         if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
2963                              &BTRFS_I(inode)->runtime_flags))
2964                 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2965
2966         return ret;
2967 }