Merge branches 'acpi-scan', 'acpi-processor' and 'acpi-assorted'
[cascardo/linux.git] / fs / btrfs / ordered-data.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 #include "disk-io.h"
28
29 static struct kmem_cache *btrfs_ordered_extent_cache;
30
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 {
33         if (entry->file_offset + entry->len < entry->file_offset)
34                 return (u64)-1;
35         return entry->file_offset + entry->len;
36 }
37
38 /* returns NULL if the insertion worked, or it returns the node it did find
39  * in the tree
40  */
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42                                    struct rb_node *node)
43 {
44         struct rb_node **p = &root->rb_node;
45         struct rb_node *parent = NULL;
46         struct btrfs_ordered_extent *entry;
47
48         while (*p) {
49                 parent = *p;
50                 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51
52                 if (file_offset < entry->file_offset)
53                         p = &(*p)->rb_left;
54                 else if (file_offset >= entry_end(entry))
55                         p = &(*p)->rb_right;
56                 else
57                         return parent;
58         }
59
60         rb_link_node(node, parent, p);
61         rb_insert_color(node, root);
62         return NULL;
63 }
64
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
66                                                u64 offset)
67 {
68         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69         btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70                     "%llu", offset);
71 }
72
73 /*
74  * look for a given offset in the tree, and if it can't be found return the
75  * first lesser offset
76  */
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78                                      struct rb_node **prev_ret)
79 {
80         struct rb_node *n = root->rb_node;
81         struct rb_node *prev = NULL;
82         struct rb_node *test;
83         struct btrfs_ordered_extent *entry;
84         struct btrfs_ordered_extent *prev_entry = NULL;
85
86         while (n) {
87                 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88                 prev = n;
89                 prev_entry = entry;
90
91                 if (file_offset < entry->file_offset)
92                         n = n->rb_left;
93                 else if (file_offset >= entry_end(entry))
94                         n = n->rb_right;
95                 else
96                         return n;
97         }
98         if (!prev_ret)
99                 return NULL;
100
101         while (prev && file_offset >= entry_end(prev_entry)) {
102                 test = rb_next(prev);
103                 if (!test)
104                         break;
105                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106                                       rb_node);
107                 if (file_offset < entry_end(prev_entry))
108                         break;
109
110                 prev = test;
111         }
112         if (prev)
113                 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114                                       rb_node);
115         while (prev && file_offset < entry_end(prev_entry)) {
116                 test = rb_prev(prev);
117                 if (!test)
118                         break;
119                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120                                       rb_node);
121                 prev = test;
122         }
123         *prev_ret = prev;
124         return NULL;
125 }
126
127 /*
128  * helper to check if a given offset is inside a given entry
129  */
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 {
132         if (file_offset < entry->file_offset ||
133             entry->file_offset + entry->len <= file_offset)
134                 return 0;
135         return 1;
136 }
137
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139                           u64 len)
140 {
141         if (file_offset + len <= entry->file_offset ||
142             entry->file_offset + entry->len <= file_offset)
143                 return 0;
144         return 1;
145 }
146
147 /*
148  * look find the first ordered struct that has this offset, otherwise
149  * the first one less than this offset
150  */
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152                                           u64 file_offset)
153 {
154         struct rb_root *root = &tree->tree;
155         struct rb_node *prev = NULL;
156         struct rb_node *ret;
157         struct btrfs_ordered_extent *entry;
158
159         if (tree->last) {
160                 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161                                  rb_node);
162                 if (offset_in_entry(entry, file_offset))
163                         return tree->last;
164         }
165         ret = __tree_search(root, file_offset, &prev);
166         if (!ret)
167                 ret = prev;
168         if (ret)
169                 tree->last = ret;
170         return ret;
171 }
172
173 /* allocate and add a new ordered_extent into the per-inode tree.
174  * file_offset is the logical offset in the file
175  *
176  * start is the disk block number of an extent already reserved in the
177  * extent allocation tree
178  *
179  * len is the length of the extent
180  *
181  * The tree is given a single reference on the ordered extent that was
182  * inserted.
183  */
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185                                       u64 start, u64 len, u64 disk_len,
186                                       int type, int dio, int compress_type)
187 {
188         struct btrfs_root *root = BTRFS_I(inode)->root;
189         struct btrfs_ordered_inode_tree *tree;
190         struct rb_node *node;
191         struct btrfs_ordered_extent *entry;
192
193         tree = &BTRFS_I(inode)->ordered_tree;
194         entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195         if (!entry)
196                 return -ENOMEM;
197
198         entry->file_offset = file_offset;
199         entry->start = start;
200         entry->len = len;
201         entry->disk_len = disk_len;
202         entry->bytes_left = len;
203         entry->inode = igrab(inode);
204         entry->compress_type = compress_type;
205         entry->truncated_len = (u64)-1;
206         if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
207                 set_bit(type, &entry->flags);
208
209         if (dio)
210                 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
211
212         /* one ref for the tree */
213         atomic_set(&entry->refs, 1);
214         init_waitqueue_head(&entry->wait);
215         INIT_LIST_HEAD(&entry->list);
216         INIT_LIST_HEAD(&entry->root_extent_list);
217         INIT_LIST_HEAD(&entry->work_list);
218         init_completion(&entry->completion);
219         INIT_LIST_HEAD(&entry->log_list);
220         INIT_LIST_HEAD(&entry->trans_list);
221
222         trace_btrfs_ordered_extent_add(inode, entry);
223
224         spin_lock_irq(&tree->lock);
225         node = tree_insert(&tree->tree, file_offset,
226                            &entry->rb_node);
227         if (node)
228                 ordered_data_tree_panic(inode, -EEXIST, file_offset);
229         spin_unlock_irq(&tree->lock);
230
231         spin_lock(&root->ordered_extent_lock);
232         list_add_tail(&entry->root_extent_list,
233                       &root->ordered_extents);
234         root->nr_ordered_extents++;
235         if (root->nr_ordered_extents == 1) {
236                 spin_lock(&root->fs_info->ordered_root_lock);
237                 BUG_ON(!list_empty(&root->ordered_root));
238                 list_add_tail(&root->ordered_root,
239                               &root->fs_info->ordered_roots);
240                 spin_unlock(&root->fs_info->ordered_root_lock);
241         }
242         spin_unlock(&root->ordered_extent_lock);
243
244         return 0;
245 }
246
247 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
248                              u64 start, u64 len, u64 disk_len, int type)
249 {
250         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
251                                           disk_len, type, 0,
252                                           BTRFS_COMPRESS_NONE);
253 }
254
255 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
256                                  u64 start, u64 len, u64 disk_len, int type)
257 {
258         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
259                                           disk_len, type, 1,
260                                           BTRFS_COMPRESS_NONE);
261 }
262
263 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
264                                       u64 start, u64 len, u64 disk_len,
265                                       int type, int compress_type)
266 {
267         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
268                                           disk_len, type, 0,
269                                           compress_type);
270 }
271
272 /*
273  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
274  * when an ordered extent is finished.  If the list covers more than one
275  * ordered extent, it is split across multiples.
276  */
277 void btrfs_add_ordered_sum(struct inode *inode,
278                            struct btrfs_ordered_extent *entry,
279                            struct btrfs_ordered_sum *sum)
280 {
281         struct btrfs_ordered_inode_tree *tree;
282
283         tree = &BTRFS_I(inode)->ordered_tree;
284         spin_lock_irq(&tree->lock);
285         list_add_tail(&sum->list, &entry->list);
286         spin_unlock_irq(&tree->lock);
287 }
288
289 /*
290  * this is used to account for finished IO across a given range
291  * of the file.  The IO may span ordered extents.  If
292  * a given ordered_extent is completely done, 1 is returned, otherwise
293  * 0.
294  *
295  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
296  * to make sure this function only returns 1 once for a given ordered extent.
297  *
298  * file_offset is updated to one byte past the range that is recorded as
299  * complete.  This allows you to walk forward in the file.
300  */
301 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
302                                    struct btrfs_ordered_extent **cached,
303                                    u64 *file_offset, u64 io_size, int uptodate)
304 {
305         struct btrfs_ordered_inode_tree *tree;
306         struct rb_node *node;
307         struct btrfs_ordered_extent *entry = NULL;
308         int ret;
309         unsigned long flags;
310         u64 dec_end;
311         u64 dec_start;
312         u64 to_dec;
313
314         tree = &BTRFS_I(inode)->ordered_tree;
315         spin_lock_irqsave(&tree->lock, flags);
316         node = tree_search(tree, *file_offset);
317         if (!node) {
318                 ret = 1;
319                 goto out;
320         }
321
322         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
323         if (!offset_in_entry(entry, *file_offset)) {
324                 ret = 1;
325                 goto out;
326         }
327
328         dec_start = max(*file_offset, entry->file_offset);
329         dec_end = min(*file_offset + io_size, entry->file_offset +
330                       entry->len);
331         *file_offset = dec_end;
332         if (dec_start > dec_end) {
333                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
334                         "bad ordering dec_start %llu end %llu", dec_start, dec_end);
335         }
336         to_dec = dec_end - dec_start;
337         if (to_dec > entry->bytes_left) {
338                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
339                         "bad ordered accounting left %llu size %llu",
340                         entry->bytes_left, to_dec);
341         }
342         entry->bytes_left -= to_dec;
343         if (!uptodate)
344                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
345
346         if (entry->bytes_left == 0) {
347                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
348                 if (waitqueue_active(&entry->wait))
349                         wake_up(&entry->wait);
350         } else {
351                 ret = 1;
352         }
353 out:
354         if (!ret && cached && entry) {
355                 *cached = entry;
356                 atomic_inc(&entry->refs);
357         }
358         spin_unlock_irqrestore(&tree->lock, flags);
359         return ret == 0;
360 }
361
362 /*
363  * this is used to account for finished IO across a given range
364  * of the file.  The IO should not span ordered extents.  If
365  * a given ordered_extent is completely done, 1 is returned, otherwise
366  * 0.
367  *
368  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
369  * to make sure this function only returns 1 once for a given ordered extent.
370  */
371 int btrfs_dec_test_ordered_pending(struct inode *inode,
372                                    struct btrfs_ordered_extent **cached,
373                                    u64 file_offset, u64 io_size, int uptodate)
374 {
375         struct btrfs_ordered_inode_tree *tree;
376         struct rb_node *node;
377         struct btrfs_ordered_extent *entry = NULL;
378         unsigned long flags;
379         int ret;
380
381         tree = &BTRFS_I(inode)->ordered_tree;
382         spin_lock_irqsave(&tree->lock, flags);
383         if (cached && *cached) {
384                 entry = *cached;
385                 goto have_entry;
386         }
387
388         node = tree_search(tree, file_offset);
389         if (!node) {
390                 ret = 1;
391                 goto out;
392         }
393
394         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
395 have_entry:
396         if (!offset_in_entry(entry, file_offset)) {
397                 ret = 1;
398                 goto out;
399         }
400
401         if (io_size > entry->bytes_left) {
402                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
403                            "bad ordered accounting left %llu size %llu",
404                        entry->bytes_left, io_size);
405         }
406         entry->bytes_left -= io_size;
407         if (!uptodate)
408                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
409
410         if (entry->bytes_left == 0) {
411                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
412                 if (waitqueue_active(&entry->wait))
413                         wake_up(&entry->wait);
414         } else {
415                 ret = 1;
416         }
417 out:
418         if (!ret && cached && entry) {
419                 *cached = entry;
420                 atomic_inc(&entry->refs);
421         }
422         spin_unlock_irqrestore(&tree->lock, flags);
423         return ret == 0;
424 }
425
426 /* Needs to either be called under a log transaction or the log_mutex */
427 void btrfs_get_logged_extents(struct inode *inode,
428                               struct list_head *logged_list,
429                               const loff_t start,
430                               const loff_t end)
431 {
432         struct btrfs_ordered_inode_tree *tree;
433         struct btrfs_ordered_extent *ordered;
434         struct rb_node *n;
435         struct rb_node *prev;
436
437         tree = &BTRFS_I(inode)->ordered_tree;
438         spin_lock_irq(&tree->lock);
439         n = __tree_search(&tree->tree, end, &prev);
440         if (!n)
441                 n = prev;
442         for (; n; n = rb_prev(n)) {
443                 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
444                 if (ordered->file_offset > end)
445                         continue;
446                 if (entry_end(ordered) <= start)
447                         break;
448                 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
449                         continue;
450                 list_add(&ordered->log_list, logged_list);
451                 atomic_inc(&ordered->refs);
452         }
453         spin_unlock_irq(&tree->lock);
454 }
455
456 void btrfs_put_logged_extents(struct list_head *logged_list)
457 {
458         struct btrfs_ordered_extent *ordered;
459
460         while (!list_empty(logged_list)) {
461                 ordered = list_first_entry(logged_list,
462                                            struct btrfs_ordered_extent,
463                                            log_list);
464                 list_del_init(&ordered->log_list);
465                 btrfs_put_ordered_extent(ordered);
466         }
467 }
468
469 void btrfs_submit_logged_extents(struct list_head *logged_list,
470                                  struct btrfs_root *log)
471 {
472         int index = log->log_transid % 2;
473
474         spin_lock_irq(&log->log_extents_lock[index]);
475         list_splice_tail(logged_list, &log->logged_list[index]);
476         spin_unlock_irq(&log->log_extents_lock[index]);
477 }
478
479 void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
480                                struct btrfs_root *log, u64 transid)
481 {
482         struct btrfs_ordered_extent *ordered;
483         int index = transid % 2;
484
485         spin_lock_irq(&log->log_extents_lock[index]);
486         while (!list_empty(&log->logged_list[index])) {
487                 ordered = list_first_entry(&log->logged_list[index],
488                                            struct btrfs_ordered_extent,
489                                            log_list);
490                 list_del_init(&ordered->log_list);
491                 spin_unlock_irq(&log->log_extents_lock[index]);
492
493                 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
494                     !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
495                         struct inode *inode = ordered->inode;
496                         u64 start = ordered->file_offset;
497                         u64 end = ordered->file_offset + ordered->len - 1;
498
499                         WARN_ON(!inode);
500                         filemap_fdatawrite_range(inode->i_mapping, start, end);
501                 }
502                 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
503                                                    &ordered->flags));
504
505                 /*
506                  * If our ordered extent completed it means it updated the
507                  * fs/subvol and csum trees already, so no need to make the
508                  * current transaction's commit wait for it, as we end up
509                  * holding memory unnecessarily and delaying the inode's iput
510                  * until the transaction commit (we schedule an iput for the
511                  * inode when the ordered extent's refcount drops to 0), which
512                  * prevents it from being evictable until the transaction
513                  * commits.
514                  */
515                 if (test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags))
516                         btrfs_put_ordered_extent(ordered);
517                 else
518                         list_add_tail(&ordered->trans_list, &trans->ordered);
519
520                 spin_lock_irq(&log->log_extents_lock[index]);
521         }
522         spin_unlock_irq(&log->log_extents_lock[index]);
523 }
524
525 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
526 {
527         struct btrfs_ordered_extent *ordered;
528         int index = transid % 2;
529
530         spin_lock_irq(&log->log_extents_lock[index]);
531         while (!list_empty(&log->logged_list[index])) {
532                 ordered = list_first_entry(&log->logged_list[index],
533                                            struct btrfs_ordered_extent,
534                                            log_list);
535                 list_del_init(&ordered->log_list);
536                 spin_unlock_irq(&log->log_extents_lock[index]);
537                 btrfs_put_ordered_extent(ordered);
538                 spin_lock_irq(&log->log_extents_lock[index]);
539         }
540         spin_unlock_irq(&log->log_extents_lock[index]);
541 }
542
543 /*
544  * used to drop a reference on an ordered extent.  This will free
545  * the extent if the last reference is dropped
546  */
547 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
548 {
549         struct list_head *cur;
550         struct btrfs_ordered_sum *sum;
551
552         trace_btrfs_ordered_extent_put(entry->inode, entry);
553
554         if (atomic_dec_and_test(&entry->refs)) {
555                 ASSERT(list_empty(&entry->log_list));
556                 ASSERT(list_empty(&entry->trans_list));
557                 ASSERT(list_empty(&entry->root_extent_list));
558                 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
559                 if (entry->inode)
560                         btrfs_add_delayed_iput(entry->inode);
561                 while (!list_empty(&entry->list)) {
562                         cur = entry->list.next;
563                         sum = list_entry(cur, struct btrfs_ordered_sum, list);
564                         list_del(&sum->list);
565                         kfree(sum);
566                 }
567                 kmem_cache_free(btrfs_ordered_extent_cache, entry);
568         }
569 }
570
571 /*
572  * remove an ordered extent from the tree.  No references are dropped
573  * and waiters are woken up.
574  */
575 void btrfs_remove_ordered_extent(struct inode *inode,
576                                  struct btrfs_ordered_extent *entry)
577 {
578         struct btrfs_ordered_inode_tree *tree;
579         struct btrfs_root *root = BTRFS_I(inode)->root;
580         struct rb_node *node;
581
582         tree = &BTRFS_I(inode)->ordered_tree;
583         spin_lock_irq(&tree->lock);
584         node = &entry->rb_node;
585         rb_erase(node, &tree->tree);
586         RB_CLEAR_NODE(node);
587         if (tree->last == node)
588                 tree->last = NULL;
589         set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
590         spin_unlock_irq(&tree->lock);
591
592         spin_lock(&root->ordered_extent_lock);
593         list_del_init(&entry->root_extent_list);
594         root->nr_ordered_extents--;
595
596         trace_btrfs_ordered_extent_remove(inode, entry);
597
598         if (!root->nr_ordered_extents) {
599                 spin_lock(&root->fs_info->ordered_root_lock);
600                 BUG_ON(list_empty(&root->ordered_root));
601                 list_del_init(&root->ordered_root);
602                 spin_unlock(&root->fs_info->ordered_root_lock);
603         }
604         spin_unlock(&root->ordered_extent_lock);
605         wake_up(&entry->wait);
606 }
607
608 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
609 {
610         struct btrfs_ordered_extent *ordered;
611
612         ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
613         btrfs_start_ordered_extent(ordered->inode, ordered, 1);
614         complete(&ordered->completion);
615 }
616
617 /*
618  * wait for all the ordered extents in a root.  This is done when balancing
619  * space between drives.
620  */
621 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
622 {
623         struct list_head splice, works;
624         struct btrfs_ordered_extent *ordered, *next;
625         int count = 0;
626
627         INIT_LIST_HEAD(&splice);
628         INIT_LIST_HEAD(&works);
629
630         mutex_lock(&root->ordered_extent_mutex);
631         spin_lock(&root->ordered_extent_lock);
632         list_splice_init(&root->ordered_extents, &splice);
633         while (!list_empty(&splice) && nr) {
634                 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
635                                            root_extent_list);
636                 list_move_tail(&ordered->root_extent_list,
637                                &root->ordered_extents);
638                 atomic_inc(&ordered->refs);
639                 spin_unlock(&root->ordered_extent_lock);
640
641                 btrfs_init_work(&ordered->flush_work,
642                                 btrfs_flush_delalloc_helper,
643                                 btrfs_run_ordered_extent_work, NULL, NULL);
644                 list_add_tail(&ordered->work_list, &works);
645                 btrfs_queue_work(root->fs_info->flush_workers,
646                                  &ordered->flush_work);
647
648                 cond_resched();
649                 spin_lock(&root->ordered_extent_lock);
650                 if (nr != -1)
651                         nr--;
652                 count++;
653         }
654         list_splice_tail(&splice, &root->ordered_extents);
655         spin_unlock(&root->ordered_extent_lock);
656
657         list_for_each_entry_safe(ordered, next, &works, work_list) {
658                 list_del_init(&ordered->work_list);
659                 wait_for_completion(&ordered->completion);
660                 btrfs_put_ordered_extent(ordered);
661                 cond_resched();
662         }
663         mutex_unlock(&root->ordered_extent_mutex);
664
665         return count;
666 }
667
668 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
669 {
670         struct btrfs_root *root;
671         struct list_head splice;
672         int done;
673
674         INIT_LIST_HEAD(&splice);
675
676         mutex_lock(&fs_info->ordered_operations_mutex);
677         spin_lock(&fs_info->ordered_root_lock);
678         list_splice_init(&fs_info->ordered_roots, &splice);
679         while (!list_empty(&splice) && nr) {
680                 root = list_first_entry(&splice, struct btrfs_root,
681                                         ordered_root);
682                 root = btrfs_grab_fs_root(root);
683                 BUG_ON(!root);
684                 list_move_tail(&root->ordered_root,
685                                &fs_info->ordered_roots);
686                 spin_unlock(&fs_info->ordered_root_lock);
687
688                 done = btrfs_wait_ordered_extents(root, nr);
689                 btrfs_put_fs_root(root);
690
691                 spin_lock(&fs_info->ordered_root_lock);
692                 if (nr != -1) {
693                         nr -= done;
694                         WARN_ON(nr < 0);
695                 }
696         }
697         list_splice_tail(&splice, &fs_info->ordered_roots);
698         spin_unlock(&fs_info->ordered_root_lock);
699         mutex_unlock(&fs_info->ordered_operations_mutex);
700 }
701
702 /*
703  * Used to start IO or wait for a given ordered extent to finish.
704  *
705  * If wait is one, this effectively waits on page writeback for all the pages
706  * in the extent, and it waits on the io completion code to insert
707  * metadata into the btree corresponding to the extent
708  */
709 void btrfs_start_ordered_extent(struct inode *inode,
710                                        struct btrfs_ordered_extent *entry,
711                                        int wait)
712 {
713         u64 start = entry->file_offset;
714         u64 end = start + entry->len - 1;
715
716         trace_btrfs_ordered_extent_start(inode, entry);
717
718         /*
719          * pages in the range can be dirty, clean or writeback.  We
720          * start IO on any dirty ones so the wait doesn't stall waiting
721          * for the flusher thread to find them
722          */
723         if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
724                 filemap_fdatawrite_range(inode->i_mapping, start, end);
725         if (wait) {
726                 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
727                                                  &entry->flags));
728         }
729 }
730
731 /*
732  * Used to wait on ordered extents across a large range of bytes.
733  */
734 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
735 {
736         int ret = 0;
737         int ret_wb = 0;
738         u64 end;
739         u64 orig_end;
740         struct btrfs_ordered_extent *ordered;
741
742         if (start + len < start) {
743                 orig_end = INT_LIMIT(loff_t);
744         } else {
745                 orig_end = start + len - 1;
746                 if (orig_end > INT_LIMIT(loff_t))
747                         orig_end = INT_LIMIT(loff_t);
748         }
749
750         /* start IO across the range first to instantiate any delalloc
751          * extents
752          */
753         ret = btrfs_fdatawrite_range(inode, start, orig_end);
754         if (ret)
755                 return ret;
756
757         /*
758          * If we have a writeback error don't return immediately. Wait first
759          * for any ordered extents that haven't completed yet. This is to make
760          * sure no one can dirty the same page ranges and call writepages()
761          * before the ordered extents complete - to avoid failures (-EEXIST)
762          * when adding the new ordered extents to the ordered tree.
763          */
764         ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
765
766         end = orig_end;
767         while (1) {
768                 ordered = btrfs_lookup_first_ordered_extent(inode, end);
769                 if (!ordered)
770                         break;
771                 if (ordered->file_offset > orig_end) {
772                         btrfs_put_ordered_extent(ordered);
773                         break;
774                 }
775                 if (ordered->file_offset + ordered->len <= start) {
776                         btrfs_put_ordered_extent(ordered);
777                         break;
778                 }
779                 btrfs_start_ordered_extent(inode, ordered, 1);
780                 end = ordered->file_offset;
781                 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
782                         ret = -EIO;
783                 btrfs_put_ordered_extent(ordered);
784                 if (ret || end == 0 || end == start)
785                         break;
786                 end--;
787         }
788         return ret_wb ? ret_wb : ret;
789 }
790
791 /*
792  * find an ordered extent corresponding to file_offset.  return NULL if
793  * nothing is found, otherwise take a reference on the extent and return it
794  */
795 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
796                                                          u64 file_offset)
797 {
798         struct btrfs_ordered_inode_tree *tree;
799         struct rb_node *node;
800         struct btrfs_ordered_extent *entry = NULL;
801
802         tree = &BTRFS_I(inode)->ordered_tree;
803         spin_lock_irq(&tree->lock);
804         node = tree_search(tree, file_offset);
805         if (!node)
806                 goto out;
807
808         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
809         if (!offset_in_entry(entry, file_offset))
810                 entry = NULL;
811         if (entry)
812                 atomic_inc(&entry->refs);
813 out:
814         spin_unlock_irq(&tree->lock);
815         return entry;
816 }
817
818 /* Since the DIO code tries to lock a wide area we need to look for any ordered
819  * extents that exist in the range, rather than just the start of the range.
820  */
821 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
822                                                         u64 file_offset,
823                                                         u64 len)
824 {
825         struct btrfs_ordered_inode_tree *tree;
826         struct rb_node *node;
827         struct btrfs_ordered_extent *entry = NULL;
828
829         tree = &BTRFS_I(inode)->ordered_tree;
830         spin_lock_irq(&tree->lock);
831         node = tree_search(tree, file_offset);
832         if (!node) {
833                 node = tree_search(tree, file_offset + len);
834                 if (!node)
835                         goto out;
836         }
837
838         while (1) {
839                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
840                 if (range_overlaps(entry, file_offset, len))
841                         break;
842
843                 if (entry->file_offset >= file_offset + len) {
844                         entry = NULL;
845                         break;
846                 }
847                 entry = NULL;
848                 node = rb_next(node);
849                 if (!node)
850                         break;
851         }
852 out:
853         if (entry)
854                 atomic_inc(&entry->refs);
855         spin_unlock_irq(&tree->lock);
856         return entry;
857 }
858
859 bool btrfs_have_ordered_extents_in_range(struct inode *inode,
860                                          u64 file_offset,
861                                          u64 len)
862 {
863         struct btrfs_ordered_extent *oe;
864
865         oe = btrfs_lookup_ordered_range(inode, file_offset, len);
866         if (oe) {
867                 btrfs_put_ordered_extent(oe);
868                 return true;
869         }
870         return false;
871 }
872
873 /*
874  * lookup and return any extent before 'file_offset'.  NULL is returned
875  * if none is found
876  */
877 struct btrfs_ordered_extent *
878 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
879 {
880         struct btrfs_ordered_inode_tree *tree;
881         struct rb_node *node;
882         struct btrfs_ordered_extent *entry = NULL;
883
884         tree = &BTRFS_I(inode)->ordered_tree;
885         spin_lock_irq(&tree->lock);
886         node = tree_search(tree, file_offset);
887         if (!node)
888                 goto out;
889
890         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
891         atomic_inc(&entry->refs);
892 out:
893         spin_unlock_irq(&tree->lock);
894         return entry;
895 }
896
897 /*
898  * After an extent is done, call this to conditionally update the on disk
899  * i_size.  i_size is updated to cover any fully written part of the file.
900  */
901 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
902                                 struct btrfs_ordered_extent *ordered)
903 {
904         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
905         u64 disk_i_size;
906         u64 new_i_size;
907         u64 i_size = i_size_read(inode);
908         struct rb_node *node;
909         struct rb_node *prev = NULL;
910         struct btrfs_ordered_extent *test;
911         int ret = 1;
912
913         spin_lock_irq(&tree->lock);
914         if (ordered) {
915                 offset = entry_end(ordered);
916                 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
917                         offset = min(offset,
918                                      ordered->file_offset +
919                                      ordered->truncated_len);
920         } else {
921                 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
922         }
923         disk_i_size = BTRFS_I(inode)->disk_i_size;
924
925         /* truncate file */
926         if (disk_i_size > i_size) {
927                 BTRFS_I(inode)->disk_i_size = i_size;
928                 ret = 0;
929                 goto out;
930         }
931
932         /*
933          * if the disk i_size is already at the inode->i_size, or
934          * this ordered extent is inside the disk i_size, we're done
935          */
936         if (disk_i_size == i_size)
937                 goto out;
938
939         /*
940          * We still need to update disk_i_size if outstanding_isize is greater
941          * than disk_i_size.
942          */
943         if (offset <= disk_i_size &&
944             (!ordered || ordered->outstanding_isize <= disk_i_size))
945                 goto out;
946
947         /*
948          * walk backward from this ordered extent to disk_i_size.
949          * if we find an ordered extent then we can't update disk i_size
950          * yet
951          */
952         if (ordered) {
953                 node = rb_prev(&ordered->rb_node);
954         } else {
955                 prev = tree_search(tree, offset);
956                 /*
957                  * we insert file extents without involving ordered struct,
958                  * so there should be no ordered struct cover this offset
959                  */
960                 if (prev) {
961                         test = rb_entry(prev, struct btrfs_ordered_extent,
962                                         rb_node);
963                         BUG_ON(offset_in_entry(test, offset));
964                 }
965                 node = prev;
966         }
967         for (; node; node = rb_prev(node)) {
968                 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
969
970                 /* We treat this entry as if it doesnt exist */
971                 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
972                         continue;
973                 if (test->file_offset + test->len <= disk_i_size)
974                         break;
975                 if (test->file_offset >= i_size)
976                         break;
977                 if (entry_end(test) > disk_i_size) {
978                         /*
979                          * we don't update disk_i_size now, so record this
980                          * undealt i_size. Or we will not know the real
981                          * i_size.
982                          */
983                         if (test->outstanding_isize < offset)
984                                 test->outstanding_isize = offset;
985                         if (ordered &&
986                             ordered->outstanding_isize >
987                             test->outstanding_isize)
988                                 test->outstanding_isize =
989                                                 ordered->outstanding_isize;
990                         goto out;
991                 }
992         }
993         new_i_size = min_t(u64, offset, i_size);
994
995         /*
996          * Some ordered extents may completed before the current one, and
997          * we hold the real i_size in ->outstanding_isize.
998          */
999         if (ordered && ordered->outstanding_isize > new_i_size)
1000                 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1001         BTRFS_I(inode)->disk_i_size = new_i_size;
1002         ret = 0;
1003 out:
1004         /*
1005          * We need to do this because we can't remove ordered extents until
1006          * after the i_disk_size has been updated and then the inode has been
1007          * updated to reflect the change, so we need to tell anybody who finds
1008          * this ordered extent that we've already done all the real work, we
1009          * just haven't completed all the other work.
1010          */
1011         if (ordered)
1012                 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1013         spin_unlock_irq(&tree->lock);
1014         return ret;
1015 }
1016
1017 /*
1018  * search the ordered extents for one corresponding to 'offset' and
1019  * try to find a checksum.  This is used because we allow pages to
1020  * be reclaimed before their checksum is actually put into the btree
1021  */
1022 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1023                            u32 *sum, int len)
1024 {
1025         struct btrfs_ordered_sum *ordered_sum;
1026         struct btrfs_ordered_extent *ordered;
1027         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1028         unsigned long num_sectors;
1029         unsigned long i;
1030         u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1031         int index = 0;
1032
1033         ordered = btrfs_lookup_ordered_extent(inode, offset);
1034         if (!ordered)
1035                 return 0;
1036
1037         spin_lock_irq(&tree->lock);
1038         list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1039                 if (disk_bytenr >= ordered_sum->bytenr &&
1040                     disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1041                         i = (disk_bytenr - ordered_sum->bytenr) >>
1042                             inode->i_sb->s_blocksize_bits;
1043                         num_sectors = ordered_sum->len >>
1044                                       inode->i_sb->s_blocksize_bits;
1045                         num_sectors = min_t(int, len - index, num_sectors - i);
1046                         memcpy(sum + index, ordered_sum->sums + i,
1047                                num_sectors);
1048
1049                         index += (int)num_sectors;
1050                         if (index == len)
1051                                 goto out;
1052                         disk_bytenr += num_sectors * sectorsize;
1053                 }
1054         }
1055 out:
1056         spin_unlock_irq(&tree->lock);
1057         btrfs_put_ordered_extent(ordered);
1058         return index;
1059 }
1060
1061 int __init ordered_data_init(void)
1062 {
1063         btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1064                                      sizeof(struct btrfs_ordered_extent), 0,
1065                                      SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1066                                      NULL);
1067         if (!btrfs_ordered_extent_cache)
1068                 return -ENOMEM;
1069
1070         return 0;
1071 }
1072
1073 void ordered_data_exit(void)
1074 {
1075         if (btrfs_ordered_extent_cache)
1076                 kmem_cache_destroy(btrfs_ordered_extent_cache);
1077 }