Btrfs: deal with free space cache errors while replaying log
[cascardo/linux.git] / fs / btrfs / tree-log.c
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/list_sort.h>
22 #include "ctree.h"
23 #include "transaction.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "compat.h"
29 #include "tree-log.h"
30 #include "hash.h"
31
32 /* magic values for the inode_only field in btrfs_log_inode:
33  *
34  * LOG_INODE_ALL means to log everything
35  * LOG_INODE_EXISTS means to log just enough to recreate the inode
36  * during log replay
37  */
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40
41 /*
42  * directory trouble cases
43  *
44  * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45  * log, we must force a full commit before doing an fsync of the directory
46  * where the unlink was done.
47  * ---> record transid of last unlink/rename per directory
48  *
49  * mkdir foo/some_dir
50  * normal commit
51  * rename foo/some_dir foo2/some_dir
52  * mkdir foo/some_dir
53  * fsync foo/some_dir/some_file
54  *
55  * The fsync above will unlink the original some_dir without recording
56  * it in its new location (foo2).  After a crash, some_dir will be gone
57  * unless the fsync of some_file forces a full commit
58  *
59  * 2) we must log any new names for any file or dir that is in the fsync
60  * log. ---> check inode while renaming/linking.
61  *
62  * 2a) we must log any new names for any file or dir during rename
63  * when the directory they are being removed from was logged.
64  * ---> check inode and old parent dir during rename
65  *
66  *  2a is actually the more important variant.  With the extra logging
67  *  a crash might unlink the old name without recreating the new one
68  *
69  * 3) after a crash, we must go through any directories with a link count
70  * of zero and redo the rm -rf
71  *
72  * mkdir f1/foo
73  * normal commit
74  * rm -rf f1/foo
75  * fsync(f1)
76  *
77  * The directory f1 was fully removed from the FS, but fsync was never
78  * called on f1, only its parent dir.  After a crash the rm -rf must
79  * be replayed.  This must be able to recurse down the entire
80  * directory tree.  The inode link count fixup code takes care of the
81  * ugly details.
82  */
83
84 /*
85  * stages for the tree walking.  The first
86  * stage (0) is to only pin down the blocks we find
87  * the second stage (1) is to make sure that all the inodes
88  * we find in the log are created in the subvolume.
89  *
90  * The last stage is to deal with directories and links and extents
91  * and all the other fun semantics
92  */
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_ALL 2
96
97 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
98                              struct btrfs_root *root, struct inode *inode,
99                              int inode_only);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101                              struct btrfs_root *root,
102                              struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104                                        struct btrfs_root *root,
105                                        struct btrfs_root *log,
106                                        struct btrfs_path *path,
107                                        u64 dirid, int del_all);
108
109 /*
110  * tree logging is a special write ahead log used to make sure that
111  * fsyncs and O_SYNCs can happen without doing full tree commits.
112  *
113  * Full tree commits are expensive because they require commonly
114  * modified blocks to be recowed, creating many dirty pages in the
115  * extent tree an 4x-6x higher write load than ext3.
116  *
117  * Instead of doing a tree commit on every fsync, we use the
118  * key ranges and transaction ids to find items for a given file or directory
119  * that have changed in this transaction.  Those items are copied into
120  * a special tree (one per subvolume root), that tree is written to disk
121  * and then the fsync is considered complete.
122  *
123  * After a crash, items are copied out of the log-tree back into the
124  * subvolume tree.  Any file data extents found are recorded in the extent
125  * allocation tree, and the log-tree freed.
126  *
127  * The log tree is read three times, once to pin down all the extents it is
128  * using in ram and once, once to create all the inodes logged in the tree
129  * and once to do all the other items.
130  */
131
132 /*
133  * start a sub transaction and setup the log tree
134  * this increments the log tree writer count to make the people
135  * syncing the tree wait for us to finish
136  */
137 static int start_log_trans(struct btrfs_trans_handle *trans,
138                            struct btrfs_root *root)
139 {
140         int ret;
141         int err = 0;
142
143         mutex_lock(&root->log_mutex);
144         if (root->log_root) {
145                 if (!root->log_start_pid) {
146                         root->log_start_pid = current->pid;
147                         root->log_multiple_pids = false;
148                 } else if (root->log_start_pid != current->pid) {
149                         root->log_multiple_pids = true;
150                 }
151
152                 atomic_inc(&root->log_batch);
153                 atomic_inc(&root->log_writers);
154                 mutex_unlock(&root->log_mutex);
155                 return 0;
156         }
157         root->log_multiple_pids = false;
158         root->log_start_pid = current->pid;
159         mutex_lock(&root->fs_info->tree_log_mutex);
160         if (!root->fs_info->log_root_tree) {
161                 ret = btrfs_init_log_root_tree(trans, root->fs_info);
162                 if (ret)
163                         err = ret;
164         }
165         if (err == 0 && !root->log_root) {
166                 ret = btrfs_add_log_tree(trans, root);
167                 if (ret)
168                         err = ret;
169         }
170         mutex_unlock(&root->fs_info->tree_log_mutex);
171         atomic_inc(&root->log_batch);
172         atomic_inc(&root->log_writers);
173         mutex_unlock(&root->log_mutex);
174         return err;
175 }
176
177 /*
178  * returns 0 if there was a log transaction running and we were able
179  * to join, or returns -ENOENT if there were not transactions
180  * in progress
181  */
182 static int join_running_log_trans(struct btrfs_root *root)
183 {
184         int ret = -ENOENT;
185
186         smp_mb();
187         if (!root->log_root)
188                 return -ENOENT;
189
190         mutex_lock(&root->log_mutex);
191         if (root->log_root) {
192                 ret = 0;
193                 atomic_inc(&root->log_writers);
194         }
195         mutex_unlock(&root->log_mutex);
196         return ret;
197 }
198
199 /*
200  * This either makes the current running log transaction wait
201  * until you call btrfs_end_log_trans() or it makes any future
202  * log transactions wait until you call btrfs_end_log_trans()
203  */
204 int btrfs_pin_log_trans(struct btrfs_root *root)
205 {
206         int ret = -ENOENT;
207
208         mutex_lock(&root->log_mutex);
209         atomic_inc(&root->log_writers);
210         mutex_unlock(&root->log_mutex);
211         return ret;
212 }
213
214 /*
215  * indicate we're done making changes to the log tree
216  * and wake up anyone waiting to do a sync
217  */
218 void btrfs_end_log_trans(struct btrfs_root *root)
219 {
220         if (atomic_dec_and_test(&root->log_writers)) {
221                 smp_mb();
222                 if (waitqueue_active(&root->log_writer_wait))
223                         wake_up(&root->log_writer_wait);
224         }
225 }
226
227
228 /*
229  * the walk control struct is used to pass state down the chain when
230  * processing the log tree.  The stage field tells us which part
231  * of the log tree processing we are currently doing.  The others
232  * are state fields used for that specific part
233  */
234 struct walk_control {
235         /* should we free the extent on disk when done?  This is used
236          * at transaction commit time while freeing a log tree
237          */
238         int free;
239
240         /* should we write out the extent buffer?  This is used
241          * while flushing the log tree to disk during a sync
242          */
243         int write;
244
245         /* should we wait for the extent buffer io to finish?  Also used
246          * while flushing the log tree to disk for a sync
247          */
248         int wait;
249
250         /* pin only walk, we record which extents on disk belong to the
251          * log trees
252          */
253         int pin;
254
255         /* what stage of the replay code we're currently in */
256         int stage;
257
258         /* the root we are currently replaying */
259         struct btrfs_root *replay_dest;
260
261         /* the trans handle for the current replay */
262         struct btrfs_trans_handle *trans;
263
264         /* the function that gets used to process blocks we find in the
265          * tree.  Note the extent_buffer might not be up to date when it is
266          * passed in, and it must be checked or read if you need the data
267          * inside it
268          */
269         int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
270                             struct walk_control *wc, u64 gen);
271 };
272
273 /*
274  * process_func used to pin down extents, write them or wait on them
275  */
276 static int process_one_buffer(struct btrfs_root *log,
277                               struct extent_buffer *eb,
278                               struct walk_control *wc, u64 gen)
279 {
280         int ret = 0;
281
282         if (wc->pin)
283                 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
284                                                       eb->start, eb->len);
285
286         if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
287                 if (wc->write)
288                         btrfs_write_tree_block(eb);
289                 if (wc->wait)
290                         btrfs_wait_tree_block_writeback(eb);
291         }
292         return ret;
293 }
294
295 /*
296  * Item overwrite used by replay and tree logging.  eb, slot and key all refer
297  * to the src data we are copying out.
298  *
299  * root is the tree we are copying into, and path is a scratch
300  * path for use in this function (it should be released on entry and
301  * will be released on exit).
302  *
303  * If the key is already in the destination tree the existing item is
304  * overwritten.  If the existing item isn't big enough, it is extended.
305  * If it is too large, it is truncated.
306  *
307  * If the key isn't in the destination yet, a new item is inserted.
308  */
309 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
310                                    struct btrfs_root *root,
311                                    struct btrfs_path *path,
312                                    struct extent_buffer *eb, int slot,
313                                    struct btrfs_key *key)
314 {
315         int ret;
316         u32 item_size;
317         u64 saved_i_size = 0;
318         int save_old_i_size = 0;
319         unsigned long src_ptr;
320         unsigned long dst_ptr;
321         int overwrite_root = 0;
322         bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
323
324         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
325                 overwrite_root = 1;
326
327         item_size = btrfs_item_size_nr(eb, slot);
328         src_ptr = btrfs_item_ptr_offset(eb, slot);
329
330         /* look for the key in the destination tree */
331         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
332         if (ret < 0)
333                 return ret;
334
335         if (ret == 0) {
336                 char *src_copy;
337                 char *dst_copy;
338                 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
339                                                   path->slots[0]);
340                 if (dst_size != item_size)
341                         goto insert;
342
343                 if (item_size == 0) {
344                         btrfs_release_path(path);
345                         return 0;
346                 }
347                 dst_copy = kmalloc(item_size, GFP_NOFS);
348                 src_copy = kmalloc(item_size, GFP_NOFS);
349                 if (!dst_copy || !src_copy) {
350                         btrfs_release_path(path);
351                         kfree(dst_copy);
352                         kfree(src_copy);
353                         return -ENOMEM;
354                 }
355
356                 read_extent_buffer(eb, src_copy, src_ptr, item_size);
357
358                 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
359                 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
360                                    item_size);
361                 ret = memcmp(dst_copy, src_copy, item_size);
362
363                 kfree(dst_copy);
364                 kfree(src_copy);
365                 /*
366                  * they have the same contents, just return, this saves
367                  * us from cowing blocks in the destination tree and doing
368                  * extra writes that may not have been done by a previous
369                  * sync
370                  */
371                 if (ret == 0) {
372                         btrfs_release_path(path);
373                         return 0;
374                 }
375
376                 /*
377                  * We need to load the old nbytes into the inode so when we
378                  * replay the extents we've logged we get the right nbytes.
379                  */
380                 if (inode_item) {
381                         struct btrfs_inode_item *item;
382                         u64 nbytes;
383
384                         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
385                                               struct btrfs_inode_item);
386                         nbytes = btrfs_inode_nbytes(path->nodes[0], item);
387                         item = btrfs_item_ptr(eb, slot,
388                                               struct btrfs_inode_item);
389                         btrfs_set_inode_nbytes(eb, item, nbytes);
390                 }
391         } else if (inode_item) {
392                 struct btrfs_inode_item *item;
393
394                 /*
395                  * New inode, set nbytes to 0 so that the nbytes comes out
396                  * properly when we replay the extents.
397                  */
398                 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
399                 btrfs_set_inode_nbytes(eb, item, 0);
400         }
401 insert:
402         btrfs_release_path(path);
403         /* try to insert the key into the destination tree */
404         ret = btrfs_insert_empty_item(trans, root, path,
405                                       key, item_size);
406
407         /* make sure any existing item is the correct size */
408         if (ret == -EEXIST) {
409                 u32 found_size;
410                 found_size = btrfs_item_size_nr(path->nodes[0],
411                                                 path->slots[0]);
412                 if (found_size > item_size)
413                         btrfs_truncate_item(root, path, item_size, 1);
414                 else if (found_size < item_size)
415                         btrfs_extend_item(root, path,
416                                           item_size - found_size);
417         } else if (ret) {
418                 return ret;
419         }
420         dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
421                                         path->slots[0]);
422
423         /* don't overwrite an existing inode if the generation number
424          * was logged as zero.  This is done when the tree logging code
425          * is just logging an inode to make sure it exists after recovery.
426          *
427          * Also, don't overwrite i_size on directories during replay.
428          * log replay inserts and removes directory items based on the
429          * state of the tree found in the subvolume, and i_size is modified
430          * as it goes
431          */
432         if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
433                 struct btrfs_inode_item *src_item;
434                 struct btrfs_inode_item *dst_item;
435
436                 src_item = (struct btrfs_inode_item *)src_ptr;
437                 dst_item = (struct btrfs_inode_item *)dst_ptr;
438
439                 if (btrfs_inode_generation(eb, src_item) == 0)
440                         goto no_copy;
441
442                 if (overwrite_root &&
443                     S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
444                     S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
445                         save_old_i_size = 1;
446                         saved_i_size = btrfs_inode_size(path->nodes[0],
447                                                         dst_item);
448                 }
449         }
450
451         copy_extent_buffer(path->nodes[0], eb, dst_ptr,
452                            src_ptr, item_size);
453
454         if (save_old_i_size) {
455                 struct btrfs_inode_item *dst_item;
456                 dst_item = (struct btrfs_inode_item *)dst_ptr;
457                 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
458         }
459
460         /* make sure the generation is filled in */
461         if (key->type == BTRFS_INODE_ITEM_KEY) {
462                 struct btrfs_inode_item *dst_item;
463                 dst_item = (struct btrfs_inode_item *)dst_ptr;
464                 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
465                         btrfs_set_inode_generation(path->nodes[0], dst_item,
466                                                    trans->transid);
467                 }
468         }
469 no_copy:
470         btrfs_mark_buffer_dirty(path->nodes[0]);
471         btrfs_release_path(path);
472         return 0;
473 }
474
475 /*
476  * simple helper to read an inode off the disk from a given root
477  * This can only be called for subvolume roots and not for the log
478  */
479 static noinline struct inode *read_one_inode(struct btrfs_root *root,
480                                              u64 objectid)
481 {
482         struct btrfs_key key;
483         struct inode *inode;
484
485         key.objectid = objectid;
486         key.type = BTRFS_INODE_ITEM_KEY;
487         key.offset = 0;
488         inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
489         if (IS_ERR(inode)) {
490                 inode = NULL;
491         } else if (is_bad_inode(inode)) {
492                 iput(inode);
493                 inode = NULL;
494         }
495         return inode;
496 }
497
498 /* replays a single extent in 'eb' at 'slot' with 'key' into the
499  * subvolume 'root'.  path is released on entry and should be released
500  * on exit.
501  *
502  * extents in the log tree have not been allocated out of the extent
503  * tree yet.  So, this completes the allocation, taking a reference
504  * as required if the extent already exists or creating a new extent
505  * if it isn't in the extent allocation tree yet.
506  *
507  * The extent is inserted into the file, dropping any existing extents
508  * from the file that overlap the new one.
509  */
510 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
511                                       struct btrfs_root *root,
512                                       struct btrfs_path *path,
513                                       struct extent_buffer *eb, int slot,
514                                       struct btrfs_key *key)
515 {
516         int found_type;
517         u64 extent_end;
518         u64 start = key->offset;
519         u64 nbytes = 0;
520         struct btrfs_file_extent_item *item;
521         struct inode *inode = NULL;
522         unsigned long size;
523         int ret = 0;
524
525         item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
526         found_type = btrfs_file_extent_type(eb, item);
527
528         if (found_type == BTRFS_FILE_EXTENT_REG ||
529             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
530                 nbytes = btrfs_file_extent_num_bytes(eb, item);
531                 extent_end = start + nbytes;
532
533                 /*
534                  * We don't add to the inodes nbytes if we are prealloc or a
535                  * hole.
536                  */
537                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
538                         nbytes = 0;
539         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
540                 size = btrfs_file_extent_inline_len(eb, item);
541                 nbytes = btrfs_file_extent_ram_bytes(eb, item);
542                 extent_end = ALIGN(start + size, root->sectorsize);
543         } else {
544                 ret = 0;
545                 goto out;
546         }
547
548         inode = read_one_inode(root, key->objectid);
549         if (!inode) {
550                 ret = -EIO;
551                 goto out;
552         }
553
554         /*
555          * first check to see if we already have this extent in the
556          * file.  This must be done before the btrfs_drop_extents run
557          * so we don't try to drop this extent.
558          */
559         ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
560                                        start, 0);
561
562         if (ret == 0 &&
563             (found_type == BTRFS_FILE_EXTENT_REG ||
564              found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
565                 struct btrfs_file_extent_item cmp1;
566                 struct btrfs_file_extent_item cmp2;
567                 struct btrfs_file_extent_item *existing;
568                 struct extent_buffer *leaf;
569
570                 leaf = path->nodes[0];
571                 existing = btrfs_item_ptr(leaf, path->slots[0],
572                                           struct btrfs_file_extent_item);
573
574                 read_extent_buffer(eb, &cmp1, (unsigned long)item,
575                                    sizeof(cmp1));
576                 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
577                                    sizeof(cmp2));
578
579                 /*
580                  * we already have a pointer to this exact extent,
581                  * we don't have to do anything
582                  */
583                 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
584                         btrfs_release_path(path);
585                         goto out;
586                 }
587         }
588         btrfs_release_path(path);
589
590         /* drop any overlapping extents */
591         ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
592         BUG_ON(ret);
593
594         if (found_type == BTRFS_FILE_EXTENT_REG ||
595             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
596                 u64 offset;
597                 unsigned long dest_offset;
598                 struct btrfs_key ins;
599
600                 ret = btrfs_insert_empty_item(trans, root, path, key,
601                                               sizeof(*item));
602                 BUG_ON(ret);
603                 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
604                                                     path->slots[0]);
605                 copy_extent_buffer(path->nodes[0], eb, dest_offset,
606                                 (unsigned long)item,  sizeof(*item));
607
608                 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
609                 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
610                 ins.type = BTRFS_EXTENT_ITEM_KEY;
611                 offset = key->offset - btrfs_file_extent_offset(eb, item);
612
613                 if (ins.objectid > 0) {
614                         u64 csum_start;
615                         u64 csum_end;
616                         LIST_HEAD(ordered_sums);
617                         /*
618                          * is this extent already allocated in the extent
619                          * allocation tree?  If so, just add a reference
620                          */
621                         ret = btrfs_lookup_extent(root, ins.objectid,
622                                                 ins.offset);
623                         if (ret == 0) {
624                                 ret = btrfs_inc_extent_ref(trans, root,
625                                                 ins.objectid, ins.offset,
626                                                 0, root->root_key.objectid,
627                                                 key->objectid, offset, 0);
628                                 if (ret)
629                                         goto out;
630                         } else {
631                                 /*
632                                  * insert the extent pointer in the extent
633                                  * allocation tree
634                                  */
635                                 ret = btrfs_alloc_logged_file_extent(trans,
636                                                 root, root->root_key.objectid,
637                                                 key->objectid, offset, &ins);
638                                 if (ret)
639                                         goto out;
640                         }
641                         btrfs_release_path(path);
642
643                         if (btrfs_file_extent_compression(eb, item)) {
644                                 csum_start = ins.objectid;
645                                 csum_end = csum_start + ins.offset;
646                         } else {
647                                 csum_start = ins.objectid +
648                                         btrfs_file_extent_offset(eb, item);
649                                 csum_end = csum_start +
650                                         btrfs_file_extent_num_bytes(eb, item);
651                         }
652
653                         ret = btrfs_lookup_csums_range(root->log_root,
654                                                 csum_start, csum_end - 1,
655                                                 &ordered_sums, 0);
656                         BUG_ON(ret);
657                         while (!list_empty(&ordered_sums)) {
658                                 struct btrfs_ordered_sum *sums;
659                                 sums = list_entry(ordered_sums.next,
660                                                 struct btrfs_ordered_sum,
661                                                 list);
662                                 ret = btrfs_csum_file_blocks(trans,
663                                                 root->fs_info->csum_root,
664                                                 sums);
665                                 BUG_ON(ret);
666                                 list_del(&sums->list);
667                                 kfree(sums);
668                         }
669                 } else {
670                         btrfs_release_path(path);
671                 }
672         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
673                 /* inline extents are easy, we just overwrite them */
674                 ret = overwrite_item(trans, root, path, eb, slot, key);
675                 BUG_ON(ret);
676         }
677
678         inode_add_bytes(inode, nbytes);
679         ret = btrfs_update_inode(trans, root, inode);
680 out:
681         if (inode)
682                 iput(inode);
683         return ret;
684 }
685
686 /*
687  * when cleaning up conflicts between the directory names in the
688  * subvolume, directory names in the log and directory names in the
689  * inode back references, we may have to unlink inodes from directories.
690  *
691  * This is a helper function to do the unlink of a specific directory
692  * item
693  */
694 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
695                                       struct btrfs_root *root,
696                                       struct btrfs_path *path,
697                                       struct inode *dir,
698                                       struct btrfs_dir_item *di)
699 {
700         struct inode *inode;
701         char *name;
702         int name_len;
703         struct extent_buffer *leaf;
704         struct btrfs_key location;
705         int ret;
706
707         leaf = path->nodes[0];
708
709         btrfs_dir_item_key_to_cpu(leaf, di, &location);
710         name_len = btrfs_dir_name_len(leaf, di);
711         name = kmalloc(name_len, GFP_NOFS);
712         if (!name)
713                 return -ENOMEM;
714
715         read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
716         btrfs_release_path(path);
717
718         inode = read_one_inode(root, location.objectid);
719         if (!inode) {
720                 kfree(name);
721                 return -EIO;
722         }
723
724         ret = link_to_fixup_dir(trans, root, path, location.objectid);
725         BUG_ON(ret);
726
727         ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
728         BUG_ON(ret);
729         kfree(name);
730
731         iput(inode);
732
733         btrfs_run_delayed_items(trans, root);
734         return ret;
735 }
736
737 /*
738  * helper function to see if a given name and sequence number found
739  * in an inode back reference are already in a directory and correctly
740  * point to this inode
741  */
742 static noinline int inode_in_dir(struct btrfs_root *root,
743                                  struct btrfs_path *path,
744                                  u64 dirid, u64 objectid, u64 index,
745                                  const char *name, int name_len)
746 {
747         struct btrfs_dir_item *di;
748         struct btrfs_key location;
749         int match = 0;
750
751         di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
752                                          index, name, name_len, 0);
753         if (di && !IS_ERR(di)) {
754                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
755                 if (location.objectid != objectid)
756                         goto out;
757         } else
758                 goto out;
759         btrfs_release_path(path);
760
761         di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
762         if (di && !IS_ERR(di)) {
763                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
764                 if (location.objectid != objectid)
765                         goto out;
766         } else
767                 goto out;
768         match = 1;
769 out:
770         btrfs_release_path(path);
771         return match;
772 }
773
774 /*
775  * helper function to check a log tree for a named back reference in
776  * an inode.  This is used to decide if a back reference that is
777  * found in the subvolume conflicts with what we find in the log.
778  *
779  * inode backreferences may have multiple refs in a single item,
780  * during replay we process one reference at a time, and we don't
781  * want to delete valid links to a file from the subvolume if that
782  * link is also in the log.
783  */
784 static noinline int backref_in_log(struct btrfs_root *log,
785                                    struct btrfs_key *key,
786                                    u64 ref_objectid,
787                                    char *name, int namelen)
788 {
789         struct btrfs_path *path;
790         struct btrfs_inode_ref *ref;
791         unsigned long ptr;
792         unsigned long ptr_end;
793         unsigned long name_ptr;
794         int found_name_len;
795         int item_size;
796         int ret;
797         int match = 0;
798
799         path = btrfs_alloc_path();
800         if (!path)
801                 return -ENOMEM;
802
803         ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
804         if (ret != 0)
805                 goto out;
806
807         ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
808
809         if (key->type == BTRFS_INODE_EXTREF_KEY) {
810                 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
811                                                    name, namelen, NULL))
812                         match = 1;
813
814                 goto out;
815         }
816
817         item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
818         ptr_end = ptr + item_size;
819         while (ptr < ptr_end) {
820                 ref = (struct btrfs_inode_ref *)ptr;
821                 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
822                 if (found_name_len == namelen) {
823                         name_ptr = (unsigned long)(ref + 1);
824                         ret = memcmp_extent_buffer(path->nodes[0], name,
825                                                    name_ptr, namelen);
826                         if (ret == 0) {
827                                 match = 1;
828                                 goto out;
829                         }
830                 }
831                 ptr = (unsigned long)(ref + 1) + found_name_len;
832         }
833 out:
834         btrfs_free_path(path);
835         return match;
836 }
837
838 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
839                                   struct btrfs_root *root,
840                                   struct btrfs_path *path,
841                                   struct btrfs_root *log_root,
842                                   struct inode *dir, struct inode *inode,
843                                   struct extent_buffer *eb,
844                                   u64 inode_objectid, u64 parent_objectid,
845                                   u64 ref_index, char *name, int namelen,
846                                   int *search_done)
847 {
848         int ret;
849         char *victim_name;
850         int victim_name_len;
851         struct extent_buffer *leaf;
852         struct btrfs_dir_item *di;
853         struct btrfs_key search_key;
854         struct btrfs_inode_extref *extref;
855
856 again:
857         /* Search old style refs */
858         search_key.objectid = inode_objectid;
859         search_key.type = BTRFS_INODE_REF_KEY;
860         search_key.offset = parent_objectid;
861         ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
862         if (ret == 0) {
863                 struct btrfs_inode_ref *victim_ref;
864                 unsigned long ptr;
865                 unsigned long ptr_end;
866
867                 leaf = path->nodes[0];
868
869                 /* are we trying to overwrite a back ref for the root directory
870                  * if so, just jump out, we're done
871                  */
872                 if (search_key.objectid == search_key.offset)
873                         return 1;
874
875                 /* check all the names in this back reference to see
876                  * if they are in the log.  if so, we allow them to stay
877                  * otherwise they must be unlinked as a conflict
878                  */
879                 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
880                 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
881                 while (ptr < ptr_end) {
882                         victim_ref = (struct btrfs_inode_ref *)ptr;
883                         victim_name_len = btrfs_inode_ref_name_len(leaf,
884                                                                    victim_ref);
885                         victim_name = kmalloc(victim_name_len, GFP_NOFS);
886                         BUG_ON(!victim_name);
887
888                         read_extent_buffer(leaf, victim_name,
889                                            (unsigned long)(victim_ref + 1),
890                                            victim_name_len);
891
892                         if (!backref_in_log(log_root, &search_key,
893                                             parent_objectid,
894                                             victim_name,
895                                             victim_name_len)) {
896                                 btrfs_inc_nlink(inode);
897                                 btrfs_release_path(path);
898
899                                 ret = btrfs_unlink_inode(trans, root, dir,
900                                                          inode, victim_name,
901                                                          victim_name_len);
902                                 BUG_ON(ret);
903                                 btrfs_run_delayed_items(trans, root);
904                                 kfree(victim_name);
905                                 *search_done = 1;
906                                 goto again;
907                         }
908                         kfree(victim_name);
909
910                         ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
911                 }
912                 BUG_ON(ret);
913
914                 /*
915                  * NOTE: we have searched root tree and checked the
916                  * coresponding ref, it does not need to check again.
917                  */
918                 *search_done = 1;
919         }
920         btrfs_release_path(path);
921
922         /* Same search but for extended refs */
923         extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
924                                            inode_objectid, parent_objectid, 0,
925                                            0);
926         if (!IS_ERR_OR_NULL(extref)) {
927                 u32 item_size;
928                 u32 cur_offset = 0;
929                 unsigned long base;
930                 struct inode *victim_parent;
931
932                 leaf = path->nodes[0];
933
934                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
935                 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
936
937                 while (cur_offset < item_size) {
938                         extref = (struct btrfs_inode_extref *)base + cur_offset;
939
940                         victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
941
942                         if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
943                                 goto next;
944
945                         victim_name = kmalloc(victim_name_len, GFP_NOFS);
946                         read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
947                                            victim_name_len);
948
949                         search_key.objectid = inode_objectid;
950                         search_key.type = BTRFS_INODE_EXTREF_KEY;
951                         search_key.offset = btrfs_extref_hash(parent_objectid,
952                                                               victim_name,
953                                                               victim_name_len);
954                         ret = 0;
955                         if (!backref_in_log(log_root, &search_key,
956                                             parent_objectid, victim_name,
957                                             victim_name_len)) {
958                                 ret = -ENOENT;
959                                 victim_parent = read_one_inode(root,
960                                                                parent_objectid);
961                                 if (victim_parent) {
962                                         btrfs_inc_nlink(inode);
963                                         btrfs_release_path(path);
964
965                                         ret = btrfs_unlink_inode(trans, root,
966                                                                  victim_parent,
967                                                                  inode,
968                                                                  victim_name,
969                                                                  victim_name_len);
970                                         btrfs_run_delayed_items(trans, root);
971                                 }
972                                 BUG_ON(ret);
973                                 iput(victim_parent);
974                                 kfree(victim_name);
975                                 *search_done = 1;
976                                 goto again;
977                         }
978                         kfree(victim_name);
979                         BUG_ON(ret);
980 next:
981                         cur_offset += victim_name_len + sizeof(*extref);
982                 }
983                 *search_done = 1;
984         }
985         btrfs_release_path(path);
986
987         /* look for a conflicting sequence number */
988         di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
989                                          ref_index, name, namelen, 0);
990         if (di && !IS_ERR(di)) {
991                 ret = drop_one_dir_item(trans, root, path, dir, di);
992                 BUG_ON(ret);
993         }
994         btrfs_release_path(path);
995
996         /* look for a conflicing name */
997         di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
998                                    name, namelen, 0);
999         if (di && !IS_ERR(di)) {
1000                 ret = drop_one_dir_item(trans, root, path, dir, di);
1001                 BUG_ON(ret);
1002         }
1003         btrfs_release_path(path);
1004
1005         return 0;
1006 }
1007
1008 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1009                              u32 *namelen, char **name, u64 *index,
1010                              u64 *parent_objectid)
1011 {
1012         struct btrfs_inode_extref *extref;
1013
1014         extref = (struct btrfs_inode_extref *)ref_ptr;
1015
1016         *namelen = btrfs_inode_extref_name_len(eb, extref);
1017         *name = kmalloc(*namelen, GFP_NOFS);
1018         if (*name == NULL)
1019                 return -ENOMEM;
1020
1021         read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1022                            *namelen);
1023
1024         *index = btrfs_inode_extref_index(eb, extref);
1025         if (parent_objectid)
1026                 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1027
1028         return 0;
1029 }
1030
1031 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1032                           u32 *namelen, char **name, u64 *index)
1033 {
1034         struct btrfs_inode_ref *ref;
1035
1036         ref = (struct btrfs_inode_ref *)ref_ptr;
1037
1038         *namelen = btrfs_inode_ref_name_len(eb, ref);
1039         *name = kmalloc(*namelen, GFP_NOFS);
1040         if (*name == NULL)
1041                 return -ENOMEM;
1042
1043         read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1044
1045         *index = btrfs_inode_ref_index(eb, ref);
1046
1047         return 0;
1048 }
1049
1050 /*
1051  * replay one inode back reference item found in the log tree.
1052  * eb, slot and key refer to the buffer and key found in the log tree.
1053  * root is the destination we are replaying into, and path is for temp
1054  * use by this function.  (it should be released on return).
1055  */
1056 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1057                                   struct btrfs_root *root,
1058                                   struct btrfs_root *log,
1059                                   struct btrfs_path *path,
1060                                   struct extent_buffer *eb, int slot,
1061                                   struct btrfs_key *key)
1062 {
1063         struct inode *dir;
1064         struct inode *inode;
1065         unsigned long ref_ptr;
1066         unsigned long ref_end;
1067         char *name;
1068         int namelen;
1069         int ret;
1070         int search_done = 0;
1071         int log_ref_ver = 0;
1072         u64 parent_objectid;
1073         u64 inode_objectid;
1074         u64 ref_index = 0;
1075         int ref_struct_size;
1076
1077         ref_ptr = btrfs_item_ptr_offset(eb, slot);
1078         ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1079
1080         if (key->type == BTRFS_INODE_EXTREF_KEY) {
1081                 struct btrfs_inode_extref *r;
1082
1083                 ref_struct_size = sizeof(struct btrfs_inode_extref);
1084                 log_ref_ver = 1;
1085                 r = (struct btrfs_inode_extref *)ref_ptr;
1086                 parent_objectid = btrfs_inode_extref_parent(eb, r);
1087         } else {
1088                 ref_struct_size = sizeof(struct btrfs_inode_ref);
1089                 parent_objectid = key->offset;
1090         }
1091         inode_objectid = key->objectid;
1092
1093         /*
1094          * it is possible that we didn't log all the parent directories
1095          * for a given inode.  If we don't find the dir, just don't
1096          * copy the back ref in.  The link count fixup code will take
1097          * care of the rest
1098          */
1099         dir = read_one_inode(root, parent_objectid);
1100         if (!dir)
1101                 return -ENOENT;
1102
1103         inode = read_one_inode(root, inode_objectid);
1104         if (!inode) {
1105                 iput(dir);
1106                 return -EIO;
1107         }
1108
1109         while (ref_ptr < ref_end) {
1110                 if (log_ref_ver) {
1111                         ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1112                                                 &ref_index, &parent_objectid);
1113                         /*
1114                          * parent object can change from one array
1115                          * item to another.
1116                          */
1117                         if (!dir)
1118                                 dir = read_one_inode(root, parent_objectid);
1119                         if (!dir)
1120                                 return -ENOENT;
1121                 } else {
1122                         ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1123                                              &ref_index);
1124                 }
1125                 if (ret)
1126                         return ret;
1127
1128                 /* if we already have a perfect match, we're done */
1129                 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1130                                   ref_index, name, namelen)) {
1131                         /*
1132                          * look for a conflicting back reference in the
1133                          * metadata. if we find one we have to unlink that name
1134                          * of the file before we add our new link.  Later on, we
1135                          * overwrite any existing back reference, and we don't
1136                          * want to create dangling pointers in the directory.
1137                          */
1138
1139                         if (!search_done) {
1140                                 ret = __add_inode_ref(trans, root, path, log,
1141                                                       dir, inode, eb,
1142                                                       inode_objectid,
1143                                                       parent_objectid,
1144                                                       ref_index, name, namelen,
1145                                                       &search_done);
1146                                 if (ret == 1)
1147                                         goto out;
1148                                 BUG_ON(ret);
1149                         }
1150
1151                         /* insert our name */
1152                         ret = btrfs_add_link(trans, dir, inode, name, namelen,
1153                                              0, ref_index);
1154                         BUG_ON(ret);
1155
1156                         btrfs_update_inode(trans, root, inode);
1157                 }
1158
1159                 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1160                 kfree(name);
1161                 if (log_ref_ver) {
1162                         iput(dir);
1163                         dir = NULL;
1164                 }
1165         }
1166
1167         /* finally write the back reference in the inode */
1168         ret = overwrite_item(trans, root, path, eb, slot, key);
1169         BUG_ON(ret);
1170
1171 out:
1172         btrfs_release_path(path);
1173         iput(dir);
1174         iput(inode);
1175         return 0;
1176 }
1177
1178 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1179                               struct btrfs_root *root, u64 offset)
1180 {
1181         int ret;
1182         ret = btrfs_find_orphan_item(root, offset);
1183         if (ret > 0)
1184                 ret = btrfs_insert_orphan_item(trans, root, offset);
1185         return ret;
1186 }
1187
1188 static int count_inode_extrefs(struct btrfs_root *root,
1189                                struct inode *inode, struct btrfs_path *path)
1190 {
1191         int ret = 0;
1192         int name_len;
1193         unsigned int nlink = 0;
1194         u32 item_size;
1195         u32 cur_offset = 0;
1196         u64 inode_objectid = btrfs_ino(inode);
1197         u64 offset = 0;
1198         unsigned long ptr;
1199         struct btrfs_inode_extref *extref;
1200         struct extent_buffer *leaf;
1201
1202         while (1) {
1203                 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1204                                             &extref, &offset);
1205                 if (ret)
1206                         break;
1207
1208                 leaf = path->nodes[0];
1209                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1210                 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1211
1212                 while (cur_offset < item_size) {
1213                         extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1214                         name_len = btrfs_inode_extref_name_len(leaf, extref);
1215
1216                         nlink++;
1217
1218                         cur_offset += name_len + sizeof(*extref);
1219                 }
1220
1221                 offset++;
1222                 btrfs_release_path(path);
1223         }
1224         btrfs_release_path(path);
1225
1226         if (ret < 0)
1227                 return ret;
1228         return nlink;
1229 }
1230
1231 static int count_inode_refs(struct btrfs_root *root,
1232                                struct inode *inode, struct btrfs_path *path)
1233 {
1234         int ret;
1235         struct btrfs_key key;
1236         unsigned int nlink = 0;
1237         unsigned long ptr;
1238         unsigned long ptr_end;
1239         int name_len;
1240         u64 ino = btrfs_ino(inode);
1241
1242         key.objectid = ino;
1243         key.type = BTRFS_INODE_REF_KEY;
1244         key.offset = (u64)-1;
1245
1246         while (1) {
1247                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1248                 if (ret < 0)
1249                         break;
1250                 if (ret > 0) {
1251                         if (path->slots[0] == 0)
1252                                 break;
1253                         path->slots[0]--;
1254                 }
1255                 btrfs_item_key_to_cpu(path->nodes[0], &key,
1256                                       path->slots[0]);
1257                 if (key.objectid != ino ||
1258                     key.type != BTRFS_INODE_REF_KEY)
1259                         break;
1260                 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1261                 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1262                                                    path->slots[0]);
1263                 while (ptr < ptr_end) {
1264                         struct btrfs_inode_ref *ref;
1265
1266                         ref = (struct btrfs_inode_ref *)ptr;
1267                         name_len = btrfs_inode_ref_name_len(path->nodes[0],
1268                                                             ref);
1269                         ptr = (unsigned long)(ref + 1) + name_len;
1270                         nlink++;
1271                 }
1272
1273                 if (key.offset == 0)
1274                         break;
1275                 key.offset--;
1276                 btrfs_release_path(path);
1277         }
1278         btrfs_release_path(path);
1279
1280         return nlink;
1281 }
1282
1283 /*
1284  * There are a few corners where the link count of the file can't
1285  * be properly maintained during replay.  So, instead of adding
1286  * lots of complexity to the log code, we just scan the backrefs
1287  * for any file that has been through replay.
1288  *
1289  * The scan will update the link count on the inode to reflect the
1290  * number of back refs found.  If it goes down to zero, the iput
1291  * will free the inode.
1292  */
1293 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1294                                            struct btrfs_root *root,
1295                                            struct inode *inode)
1296 {
1297         struct btrfs_path *path;
1298         int ret;
1299         u64 nlink = 0;
1300         u64 ino = btrfs_ino(inode);
1301
1302         path = btrfs_alloc_path();
1303         if (!path)
1304                 return -ENOMEM;
1305
1306         ret = count_inode_refs(root, inode, path);
1307         if (ret < 0)
1308                 goto out;
1309
1310         nlink = ret;
1311
1312         ret = count_inode_extrefs(root, inode, path);
1313         if (ret == -ENOENT)
1314                 ret = 0;
1315
1316         if (ret < 0)
1317                 goto out;
1318
1319         nlink += ret;
1320
1321         ret = 0;
1322
1323         if (nlink != inode->i_nlink) {
1324                 set_nlink(inode, nlink);
1325                 btrfs_update_inode(trans, root, inode);
1326         }
1327         BTRFS_I(inode)->index_cnt = (u64)-1;
1328
1329         if (inode->i_nlink == 0) {
1330                 if (S_ISDIR(inode->i_mode)) {
1331                         ret = replay_dir_deletes(trans, root, NULL, path,
1332                                                  ino, 1);
1333                         BUG_ON(ret);
1334                 }
1335                 ret = insert_orphan_item(trans, root, ino);
1336                 BUG_ON(ret);
1337         }
1338
1339 out:
1340         btrfs_free_path(path);
1341         return ret;
1342 }
1343
1344 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1345                                             struct btrfs_root *root,
1346                                             struct btrfs_path *path)
1347 {
1348         int ret;
1349         struct btrfs_key key;
1350         struct inode *inode;
1351
1352         key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1353         key.type = BTRFS_ORPHAN_ITEM_KEY;
1354         key.offset = (u64)-1;
1355         while (1) {
1356                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1357                 if (ret < 0)
1358                         break;
1359
1360                 if (ret == 1) {
1361                         if (path->slots[0] == 0)
1362                                 break;
1363                         path->slots[0]--;
1364                 }
1365
1366                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1367                 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1368                     key.type != BTRFS_ORPHAN_ITEM_KEY)
1369                         break;
1370
1371                 ret = btrfs_del_item(trans, root, path);
1372                 if (ret)
1373                         goto out;
1374
1375                 btrfs_release_path(path);
1376                 inode = read_one_inode(root, key.offset);
1377                 if (!inode)
1378                         return -EIO;
1379
1380                 ret = fixup_inode_link_count(trans, root, inode);
1381                 BUG_ON(ret);
1382
1383                 iput(inode);
1384
1385                 /*
1386                  * fixup on a directory may create new entries,
1387                  * make sure we always look for the highset possible
1388                  * offset
1389                  */
1390                 key.offset = (u64)-1;
1391         }
1392         ret = 0;
1393 out:
1394         btrfs_release_path(path);
1395         return ret;
1396 }
1397
1398
1399 /*
1400  * record a given inode in the fixup dir so we can check its link
1401  * count when replay is done.  The link count is incremented here
1402  * so the inode won't go away until we check it
1403  */
1404 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1405                                       struct btrfs_root *root,
1406                                       struct btrfs_path *path,
1407                                       u64 objectid)
1408 {
1409         struct btrfs_key key;
1410         int ret = 0;
1411         struct inode *inode;
1412
1413         inode = read_one_inode(root, objectid);
1414         if (!inode)
1415                 return -EIO;
1416
1417         key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1418         btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1419         key.offset = objectid;
1420
1421         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1422
1423         btrfs_release_path(path);
1424         if (ret == 0) {
1425                 if (!inode->i_nlink)
1426                         set_nlink(inode, 1);
1427                 else
1428                         btrfs_inc_nlink(inode);
1429                 ret = btrfs_update_inode(trans, root, inode);
1430         } else if (ret == -EEXIST) {
1431                 ret = 0;
1432         } else {
1433                 BUG();
1434         }
1435         iput(inode);
1436
1437         return ret;
1438 }
1439
1440 /*
1441  * when replaying the log for a directory, we only insert names
1442  * for inodes that actually exist.  This means an fsync on a directory
1443  * does not implicitly fsync all the new files in it
1444  */
1445 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1446                                     struct btrfs_root *root,
1447                                     struct btrfs_path *path,
1448                                     u64 dirid, u64 index,
1449                                     char *name, int name_len, u8 type,
1450                                     struct btrfs_key *location)
1451 {
1452         struct inode *inode;
1453         struct inode *dir;
1454         int ret;
1455
1456         inode = read_one_inode(root, location->objectid);
1457         if (!inode)
1458                 return -ENOENT;
1459
1460         dir = read_one_inode(root, dirid);
1461         if (!dir) {
1462                 iput(inode);
1463                 return -EIO;
1464         }
1465         ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1466
1467         /* FIXME, put inode into FIXUP list */
1468
1469         iput(inode);
1470         iput(dir);
1471         return ret;
1472 }
1473
1474 /*
1475  * take a single entry in a log directory item and replay it into
1476  * the subvolume.
1477  *
1478  * if a conflicting item exists in the subdirectory already,
1479  * the inode it points to is unlinked and put into the link count
1480  * fix up tree.
1481  *
1482  * If a name from the log points to a file or directory that does
1483  * not exist in the FS, it is skipped.  fsyncs on directories
1484  * do not force down inodes inside that directory, just changes to the
1485  * names or unlinks in a directory.
1486  */
1487 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1488                                     struct btrfs_root *root,
1489                                     struct btrfs_path *path,
1490                                     struct extent_buffer *eb,
1491                                     struct btrfs_dir_item *di,
1492                                     struct btrfs_key *key)
1493 {
1494         char *name;
1495         int name_len;
1496         struct btrfs_dir_item *dst_di;
1497         struct btrfs_key found_key;
1498         struct btrfs_key log_key;
1499         struct inode *dir;
1500         u8 log_type;
1501         int exists;
1502         int ret;
1503
1504         dir = read_one_inode(root, key->objectid);
1505         if (!dir)
1506                 return -EIO;
1507
1508         name_len = btrfs_dir_name_len(eb, di);
1509         name = kmalloc(name_len, GFP_NOFS);
1510         if (!name)
1511                 return -ENOMEM;
1512
1513         log_type = btrfs_dir_type(eb, di);
1514         read_extent_buffer(eb, name, (unsigned long)(di + 1),
1515                    name_len);
1516
1517         btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1518         exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1519         if (exists == 0)
1520                 exists = 1;
1521         else
1522                 exists = 0;
1523         btrfs_release_path(path);
1524
1525         if (key->type == BTRFS_DIR_ITEM_KEY) {
1526                 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1527                                        name, name_len, 1);
1528         } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1529                 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1530                                                      key->objectid,
1531                                                      key->offset, name,
1532                                                      name_len, 1);
1533         } else {
1534                 BUG();
1535         }
1536         if (IS_ERR_OR_NULL(dst_di)) {
1537                 /* we need a sequence number to insert, so we only
1538                  * do inserts for the BTRFS_DIR_INDEX_KEY types
1539                  */
1540                 if (key->type != BTRFS_DIR_INDEX_KEY)
1541                         goto out;
1542                 goto insert;
1543         }
1544
1545         btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1546         /* the existing item matches the logged item */
1547         if (found_key.objectid == log_key.objectid &&
1548             found_key.type == log_key.type &&
1549             found_key.offset == log_key.offset &&
1550             btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1551                 goto out;
1552         }
1553
1554         /*
1555          * don't drop the conflicting directory entry if the inode
1556          * for the new entry doesn't exist
1557          */
1558         if (!exists)
1559                 goto out;
1560
1561         ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1562         BUG_ON(ret);
1563
1564         if (key->type == BTRFS_DIR_INDEX_KEY)
1565                 goto insert;
1566 out:
1567         btrfs_release_path(path);
1568         kfree(name);
1569         iput(dir);
1570         return 0;
1571
1572 insert:
1573         btrfs_release_path(path);
1574         ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1575                               name, name_len, log_type, &log_key);
1576
1577         BUG_ON(ret && ret != -ENOENT);
1578         goto out;
1579 }
1580
1581 /*
1582  * find all the names in a directory item and reconcile them into
1583  * the subvolume.  Only BTRFS_DIR_ITEM_KEY types will have more than
1584  * one name in a directory item, but the same code gets used for
1585  * both directory index types
1586  */
1587 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1588                                         struct btrfs_root *root,
1589                                         struct btrfs_path *path,
1590                                         struct extent_buffer *eb, int slot,
1591                                         struct btrfs_key *key)
1592 {
1593         int ret;
1594         u32 item_size = btrfs_item_size_nr(eb, slot);
1595         struct btrfs_dir_item *di;
1596         int name_len;
1597         unsigned long ptr;
1598         unsigned long ptr_end;
1599
1600         ptr = btrfs_item_ptr_offset(eb, slot);
1601         ptr_end = ptr + item_size;
1602         while (ptr < ptr_end) {
1603                 di = (struct btrfs_dir_item *)ptr;
1604                 if (verify_dir_item(root, eb, di))
1605                         return -EIO;
1606                 name_len = btrfs_dir_name_len(eb, di);
1607                 ret = replay_one_name(trans, root, path, eb, di, key);
1608                 BUG_ON(ret);
1609                 ptr = (unsigned long)(di + 1);
1610                 ptr += name_len;
1611         }
1612         return 0;
1613 }
1614
1615 /*
1616  * directory replay has two parts.  There are the standard directory
1617  * items in the log copied from the subvolume, and range items
1618  * created in the log while the subvolume was logged.
1619  *
1620  * The range items tell us which parts of the key space the log
1621  * is authoritative for.  During replay, if a key in the subvolume
1622  * directory is in a logged range item, but not actually in the log
1623  * that means it was deleted from the directory before the fsync
1624  * and should be removed.
1625  */
1626 static noinline int find_dir_range(struct btrfs_root *root,
1627                                    struct btrfs_path *path,
1628                                    u64 dirid, int key_type,
1629                                    u64 *start_ret, u64 *end_ret)
1630 {
1631         struct btrfs_key key;
1632         u64 found_end;
1633         struct btrfs_dir_log_item *item;
1634         int ret;
1635         int nritems;
1636
1637         if (*start_ret == (u64)-1)
1638                 return 1;
1639
1640         key.objectid = dirid;
1641         key.type = key_type;
1642         key.offset = *start_ret;
1643
1644         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1645         if (ret < 0)
1646                 goto out;
1647         if (ret > 0) {
1648                 if (path->slots[0] == 0)
1649                         goto out;
1650                 path->slots[0]--;
1651         }
1652         if (ret != 0)
1653                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1654
1655         if (key.type != key_type || key.objectid != dirid) {
1656                 ret = 1;
1657                 goto next;
1658         }
1659         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1660                               struct btrfs_dir_log_item);
1661         found_end = btrfs_dir_log_end(path->nodes[0], item);
1662
1663         if (*start_ret >= key.offset && *start_ret <= found_end) {
1664                 ret = 0;
1665                 *start_ret = key.offset;
1666                 *end_ret = found_end;
1667                 goto out;
1668         }
1669         ret = 1;
1670 next:
1671         /* check the next slot in the tree to see if it is a valid item */
1672         nritems = btrfs_header_nritems(path->nodes[0]);
1673         if (path->slots[0] >= nritems) {
1674                 ret = btrfs_next_leaf(root, path);
1675                 if (ret)
1676                         goto out;
1677         } else {
1678                 path->slots[0]++;
1679         }
1680
1681         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1682
1683         if (key.type != key_type || key.objectid != dirid) {
1684                 ret = 1;
1685                 goto out;
1686         }
1687         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1688                               struct btrfs_dir_log_item);
1689         found_end = btrfs_dir_log_end(path->nodes[0], item);
1690         *start_ret = key.offset;
1691         *end_ret = found_end;
1692         ret = 0;
1693 out:
1694         btrfs_release_path(path);
1695         return ret;
1696 }
1697
1698 /*
1699  * this looks for a given directory item in the log.  If the directory
1700  * item is not in the log, the item is removed and the inode it points
1701  * to is unlinked
1702  */
1703 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1704                                       struct btrfs_root *root,
1705                                       struct btrfs_root *log,
1706                                       struct btrfs_path *path,
1707                                       struct btrfs_path *log_path,
1708                                       struct inode *dir,
1709                                       struct btrfs_key *dir_key)
1710 {
1711         int ret;
1712         struct extent_buffer *eb;
1713         int slot;
1714         u32 item_size;
1715         struct btrfs_dir_item *di;
1716         struct btrfs_dir_item *log_di;
1717         int name_len;
1718         unsigned long ptr;
1719         unsigned long ptr_end;
1720         char *name;
1721         struct inode *inode;
1722         struct btrfs_key location;
1723
1724 again:
1725         eb = path->nodes[0];
1726         slot = path->slots[0];
1727         item_size = btrfs_item_size_nr(eb, slot);
1728         ptr = btrfs_item_ptr_offset(eb, slot);
1729         ptr_end = ptr + item_size;
1730         while (ptr < ptr_end) {
1731                 di = (struct btrfs_dir_item *)ptr;
1732                 if (verify_dir_item(root, eb, di)) {
1733                         ret = -EIO;
1734                         goto out;
1735                 }
1736
1737                 name_len = btrfs_dir_name_len(eb, di);
1738                 name = kmalloc(name_len, GFP_NOFS);
1739                 if (!name) {
1740                         ret = -ENOMEM;
1741                         goto out;
1742                 }
1743                 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1744                                   name_len);
1745                 log_di = NULL;
1746                 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1747                         log_di = btrfs_lookup_dir_item(trans, log, log_path,
1748                                                        dir_key->objectid,
1749                                                        name, name_len, 0);
1750                 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1751                         log_di = btrfs_lookup_dir_index_item(trans, log,
1752                                                      log_path,
1753                                                      dir_key->objectid,
1754                                                      dir_key->offset,
1755                                                      name, name_len, 0);
1756                 }
1757                 if (IS_ERR_OR_NULL(log_di)) {
1758                         btrfs_dir_item_key_to_cpu(eb, di, &location);
1759                         btrfs_release_path(path);
1760                         btrfs_release_path(log_path);
1761                         inode = read_one_inode(root, location.objectid);
1762                         if (!inode) {
1763                                 kfree(name);
1764                                 return -EIO;
1765                         }
1766
1767                         ret = link_to_fixup_dir(trans, root,
1768                                                 path, location.objectid);
1769                         BUG_ON(ret);
1770                         btrfs_inc_nlink(inode);
1771                         ret = btrfs_unlink_inode(trans, root, dir, inode,
1772                                                  name, name_len);
1773                         BUG_ON(ret);
1774
1775                         btrfs_run_delayed_items(trans, root);
1776
1777                         kfree(name);
1778                         iput(inode);
1779
1780                         /* there might still be more names under this key
1781                          * check and repeat if required
1782                          */
1783                         ret = btrfs_search_slot(NULL, root, dir_key, path,
1784                                                 0, 0);
1785                         if (ret == 0)
1786                                 goto again;
1787                         ret = 0;
1788                         goto out;
1789                 }
1790                 btrfs_release_path(log_path);
1791                 kfree(name);
1792
1793                 ptr = (unsigned long)(di + 1);
1794                 ptr += name_len;
1795         }
1796         ret = 0;
1797 out:
1798         btrfs_release_path(path);
1799         btrfs_release_path(log_path);
1800         return ret;
1801 }
1802
1803 /*
1804  * deletion replay happens before we copy any new directory items
1805  * out of the log or out of backreferences from inodes.  It
1806  * scans the log to find ranges of keys that log is authoritative for,
1807  * and then scans the directory to find items in those ranges that are
1808  * not present in the log.
1809  *
1810  * Anything we don't find in the log is unlinked and removed from the
1811  * directory.
1812  */
1813 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1814                                        struct btrfs_root *root,
1815                                        struct btrfs_root *log,
1816                                        struct btrfs_path *path,
1817                                        u64 dirid, int del_all)
1818 {
1819         u64 range_start;
1820         u64 range_end;
1821         int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1822         int ret = 0;
1823         struct btrfs_key dir_key;
1824         struct btrfs_key found_key;
1825         struct btrfs_path *log_path;
1826         struct inode *dir;
1827
1828         dir_key.objectid = dirid;
1829         dir_key.type = BTRFS_DIR_ITEM_KEY;
1830         log_path = btrfs_alloc_path();
1831         if (!log_path)
1832                 return -ENOMEM;
1833
1834         dir = read_one_inode(root, dirid);
1835         /* it isn't an error if the inode isn't there, that can happen
1836          * because we replay the deletes before we copy in the inode item
1837          * from the log
1838          */
1839         if (!dir) {
1840                 btrfs_free_path(log_path);
1841                 return 0;
1842         }
1843 again:
1844         range_start = 0;
1845         range_end = 0;
1846         while (1) {
1847                 if (del_all)
1848                         range_end = (u64)-1;
1849                 else {
1850                         ret = find_dir_range(log, path, dirid, key_type,
1851                                              &range_start, &range_end);
1852                         if (ret != 0)
1853                                 break;
1854                 }
1855
1856                 dir_key.offset = range_start;
1857                 while (1) {
1858                         int nritems;
1859                         ret = btrfs_search_slot(NULL, root, &dir_key, path,
1860                                                 0, 0);
1861                         if (ret < 0)
1862                                 goto out;
1863
1864                         nritems = btrfs_header_nritems(path->nodes[0]);
1865                         if (path->slots[0] >= nritems) {
1866                                 ret = btrfs_next_leaf(root, path);
1867                                 if (ret)
1868                                         break;
1869                         }
1870                         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1871                                               path->slots[0]);
1872                         if (found_key.objectid != dirid ||
1873                             found_key.type != dir_key.type)
1874                                 goto next_type;
1875
1876                         if (found_key.offset > range_end)
1877                                 break;
1878
1879                         ret = check_item_in_log(trans, root, log, path,
1880                                                 log_path, dir,
1881                                                 &found_key);
1882                         BUG_ON(ret);
1883                         if (found_key.offset == (u64)-1)
1884                                 break;
1885                         dir_key.offset = found_key.offset + 1;
1886                 }
1887                 btrfs_release_path(path);
1888                 if (range_end == (u64)-1)
1889                         break;
1890                 range_start = range_end + 1;
1891         }
1892
1893 next_type:
1894         ret = 0;
1895         if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1896                 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1897                 dir_key.type = BTRFS_DIR_INDEX_KEY;
1898                 btrfs_release_path(path);
1899                 goto again;
1900         }
1901 out:
1902         btrfs_release_path(path);
1903         btrfs_free_path(log_path);
1904         iput(dir);
1905         return ret;
1906 }
1907
1908 /*
1909  * the process_func used to replay items from the log tree.  This
1910  * gets called in two different stages.  The first stage just looks
1911  * for inodes and makes sure they are all copied into the subvolume.
1912  *
1913  * The second stage copies all the other item types from the log into
1914  * the subvolume.  The two stage approach is slower, but gets rid of
1915  * lots of complexity around inodes referencing other inodes that exist
1916  * only in the log (references come from either directory items or inode
1917  * back refs).
1918  */
1919 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1920                              struct walk_control *wc, u64 gen)
1921 {
1922         int nritems;
1923         struct btrfs_path *path;
1924         struct btrfs_root *root = wc->replay_dest;
1925         struct btrfs_key key;
1926         int level;
1927         int i;
1928         int ret;
1929
1930         ret = btrfs_read_buffer(eb, gen);
1931         if (ret)
1932                 return ret;
1933
1934         level = btrfs_header_level(eb);
1935
1936         if (level != 0)
1937                 return 0;
1938
1939         path = btrfs_alloc_path();
1940         if (!path)
1941                 return -ENOMEM;
1942
1943         nritems = btrfs_header_nritems(eb);
1944         for (i = 0; i < nritems; i++) {
1945                 btrfs_item_key_to_cpu(eb, &key, i);
1946
1947                 /* inode keys are done during the first stage */
1948                 if (key.type == BTRFS_INODE_ITEM_KEY &&
1949                     wc->stage == LOG_WALK_REPLAY_INODES) {
1950                         struct btrfs_inode_item *inode_item;
1951                         u32 mode;
1952
1953                         inode_item = btrfs_item_ptr(eb, i,
1954                                             struct btrfs_inode_item);
1955                         mode = btrfs_inode_mode(eb, inode_item);
1956                         if (S_ISDIR(mode)) {
1957                                 ret = replay_dir_deletes(wc->trans,
1958                                          root, log, path, key.objectid, 0);
1959                                 if (ret)
1960                                         break;
1961                         }
1962                         ret = overwrite_item(wc->trans, root, path,
1963                                              eb, i, &key);
1964                         if (ret)
1965                                 break;
1966
1967                         /* for regular files, make sure corresponding
1968                          * orhpan item exist. extents past the new EOF
1969                          * will be truncated later by orphan cleanup.
1970                          */
1971                         if (S_ISREG(mode)) {
1972                                 ret = insert_orphan_item(wc->trans, root,
1973                                                          key.objectid);
1974                                 if (ret)
1975                                         break;
1976                         }
1977
1978                         ret = link_to_fixup_dir(wc->trans, root,
1979                                                 path, key.objectid);
1980                         if (ret)
1981                                 break;
1982                 }
1983                 if (wc->stage < LOG_WALK_REPLAY_ALL)
1984                         continue;
1985
1986                 /* these keys are simply copied */
1987                 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1988                         ret = overwrite_item(wc->trans, root, path,
1989                                              eb, i, &key);
1990                         if (ret)
1991                                 break;
1992                 } else if (key.type == BTRFS_INODE_REF_KEY) {
1993                         ret = add_inode_ref(wc->trans, root, log, path,
1994                                             eb, i, &key);
1995                         if (ret && ret != -ENOENT)
1996                                 break;
1997                         ret = 0;
1998                 } else if (key.type == BTRFS_INODE_EXTREF_KEY) {
1999                         ret = add_inode_ref(wc->trans, root, log, path,
2000                                             eb, i, &key);
2001                         if (ret && ret != -ENOENT)
2002                                 break;
2003                         ret = 0;
2004                 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2005                         ret = replay_one_extent(wc->trans, root, path,
2006                                                 eb, i, &key);
2007                         if (ret)
2008                                 break;
2009                 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
2010                            key.type == BTRFS_DIR_INDEX_KEY) {
2011                         ret = replay_one_dir_item(wc->trans, root, path,
2012                                                   eb, i, &key);
2013                         if (ret)
2014                                 break;
2015                 }
2016         }
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2022                                    struct btrfs_root *root,
2023                                    struct btrfs_path *path, int *level,
2024                                    struct walk_control *wc)
2025 {
2026         u64 root_owner;
2027         u64 bytenr;
2028         u64 ptr_gen;
2029         struct extent_buffer *next;
2030         struct extent_buffer *cur;
2031         struct extent_buffer *parent;
2032         u32 blocksize;
2033         int ret = 0;
2034
2035         WARN_ON(*level < 0);
2036         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2037
2038         while (*level > 0) {
2039                 WARN_ON(*level < 0);
2040                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2041                 cur = path->nodes[*level];
2042
2043                 if (btrfs_header_level(cur) != *level)
2044                         WARN_ON(1);
2045
2046                 if (path->slots[*level] >=
2047                     btrfs_header_nritems(cur))
2048                         break;
2049
2050                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2051                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2052                 blocksize = btrfs_level_size(root, *level - 1);
2053
2054                 parent = path->nodes[*level];
2055                 root_owner = btrfs_header_owner(parent);
2056
2057                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2058                 if (!next)
2059                         return -ENOMEM;
2060
2061                 if (*level == 1) {
2062                         ret = wc->process_func(root, next, wc, ptr_gen);
2063                         if (ret) {
2064                                 free_extent_buffer(next);
2065                                 return ret;
2066                         }
2067
2068                         path->slots[*level]++;
2069                         if (wc->free) {
2070                                 ret = btrfs_read_buffer(next, ptr_gen);
2071                                 if (ret) {
2072                                         free_extent_buffer(next);
2073                                         return ret;
2074                                 }
2075
2076                                 btrfs_tree_lock(next);
2077                                 btrfs_set_lock_blocking(next);
2078                                 clean_tree_block(trans, root, next);
2079                                 btrfs_wait_tree_block_writeback(next);
2080                                 btrfs_tree_unlock(next);
2081
2082                                 WARN_ON(root_owner !=
2083                                         BTRFS_TREE_LOG_OBJECTID);
2084                                 ret = btrfs_free_and_pin_reserved_extent(root,
2085                                                          bytenr, blocksize);
2086                                 BUG_ON(ret); /* -ENOMEM or logic errors */
2087                         }
2088                         free_extent_buffer(next);
2089                         continue;
2090                 }
2091                 ret = btrfs_read_buffer(next, ptr_gen);
2092                 if (ret) {
2093                         free_extent_buffer(next);
2094                         return ret;
2095                 }
2096
2097                 WARN_ON(*level <= 0);
2098                 if (path->nodes[*level-1])
2099                         free_extent_buffer(path->nodes[*level-1]);
2100                 path->nodes[*level-1] = next;
2101                 *level = btrfs_header_level(next);
2102                 path->slots[*level] = 0;
2103                 cond_resched();
2104         }
2105         WARN_ON(*level < 0);
2106         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2107
2108         path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2109
2110         cond_resched();
2111         return 0;
2112 }
2113
2114 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2115                                  struct btrfs_root *root,
2116                                  struct btrfs_path *path, int *level,
2117                                  struct walk_control *wc)
2118 {
2119         u64 root_owner;
2120         int i;
2121         int slot;
2122         int ret;
2123
2124         for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2125                 slot = path->slots[i];
2126                 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2127                         path->slots[i]++;
2128                         *level = i;
2129                         WARN_ON(*level == 0);
2130                         return 0;
2131                 } else {
2132                         struct extent_buffer *parent;
2133                         if (path->nodes[*level] == root->node)
2134                                 parent = path->nodes[*level];
2135                         else
2136                                 parent = path->nodes[*level + 1];
2137
2138                         root_owner = btrfs_header_owner(parent);
2139                         ret = wc->process_func(root, path->nodes[*level], wc,
2140                                  btrfs_header_generation(path->nodes[*level]));
2141                         if (ret)
2142                                 return ret;
2143
2144                         if (wc->free) {
2145                                 struct extent_buffer *next;
2146
2147                                 next = path->nodes[*level];
2148
2149                                 btrfs_tree_lock(next);
2150                                 btrfs_set_lock_blocking(next);
2151                                 clean_tree_block(trans, root, next);
2152                                 btrfs_wait_tree_block_writeback(next);
2153                                 btrfs_tree_unlock(next);
2154
2155                                 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2156                                 ret = btrfs_free_and_pin_reserved_extent(root,
2157                                                 path->nodes[*level]->start,
2158                                                 path->nodes[*level]->len);
2159                                 BUG_ON(ret);
2160                         }
2161                         free_extent_buffer(path->nodes[*level]);
2162                         path->nodes[*level] = NULL;
2163                         *level = i + 1;
2164                 }
2165         }
2166         return 1;
2167 }
2168
2169 /*
2170  * drop the reference count on the tree rooted at 'snap'.  This traverses
2171  * the tree freeing any blocks that have a ref count of zero after being
2172  * decremented.
2173  */
2174 static int walk_log_tree(struct btrfs_trans_handle *trans,
2175                          struct btrfs_root *log, struct walk_control *wc)
2176 {
2177         int ret = 0;
2178         int wret;
2179         int level;
2180         struct btrfs_path *path;
2181         int orig_level;
2182
2183         path = btrfs_alloc_path();
2184         if (!path)
2185                 return -ENOMEM;
2186
2187         level = btrfs_header_level(log->node);
2188         orig_level = level;
2189         path->nodes[level] = log->node;
2190         extent_buffer_get(log->node);
2191         path->slots[level] = 0;
2192
2193         while (1) {
2194                 wret = walk_down_log_tree(trans, log, path, &level, wc);
2195                 if (wret > 0)
2196                         break;
2197                 if (wret < 0) {
2198                         ret = wret;
2199                         goto out;
2200                 }
2201
2202                 wret = walk_up_log_tree(trans, log, path, &level, wc);
2203                 if (wret > 0)
2204                         break;
2205                 if (wret < 0) {
2206                         ret = wret;
2207                         goto out;
2208                 }
2209         }
2210
2211         /* was the root node processed? if not, catch it here */
2212         if (path->nodes[orig_level]) {
2213                 ret = wc->process_func(log, path->nodes[orig_level], wc,
2214                          btrfs_header_generation(path->nodes[orig_level]));
2215                 if (ret)
2216                         goto out;
2217                 if (wc->free) {
2218                         struct extent_buffer *next;
2219
2220                         next = path->nodes[orig_level];
2221
2222                         btrfs_tree_lock(next);
2223                         btrfs_set_lock_blocking(next);
2224                         clean_tree_block(trans, log, next);
2225                         btrfs_wait_tree_block_writeback(next);
2226                         btrfs_tree_unlock(next);
2227
2228                         WARN_ON(log->root_key.objectid !=
2229                                 BTRFS_TREE_LOG_OBJECTID);
2230                         ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2231                                                          next->len);
2232                         BUG_ON(ret); /* -ENOMEM or logic errors */
2233                 }
2234         }
2235
2236 out:
2237         btrfs_free_path(path);
2238         return ret;
2239 }
2240
2241 /*
2242  * helper function to update the item for a given subvolumes log root
2243  * in the tree of log roots
2244  */
2245 static int update_log_root(struct btrfs_trans_handle *trans,
2246                            struct btrfs_root *log)
2247 {
2248         int ret;
2249
2250         if (log->log_transid == 1) {
2251                 /* insert root item on the first sync */
2252                 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2253                                 &log->root_key, &log->root_item);
2254         } else {
2255                 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2256                                 &log->root_key, &log->root_item);
2257         }
2258         return ret;
2259 }
2260
2261 static int wait_log_commit(struct btrfs_trans_handle *trans,
2262                            struct btrfs_root *root, unsigned long transid)
2263 {
2264         DEFINE_WAIT(wait);
2265         int index = transid % 2;
2266
2267         /*
2268          * we only allow two pending log transactions at a time,
2269          * so we know that if ours is more than 2 older than the
2270          * current transaction, we're done
2271          */
2272         do {
2273                 prepare_to_wait(&root->log_commit_wait[index],
2274                                 &wait, TASK_UNINTERRUPTIBLE);
2275                 mutex_unlock(&root->log_mutex);
2276
2277                 if (root->fs_info->last_trans_log_full_commit !=
2278                     trans->transid && root->log_transid < transid + 2 &&
2279                     atomic_read(&root->log_commit[index]))
2280                         schedule();
2281
2282                 finish_wait(&root->log_commit_wait[index], &wait);
2283                 mutex_lock(&root->log_mutex);
2284         } while (root->fs_info->last_trans_log_full_commit !=
2285                  trans->transid && root->log_transid < transid + 2 &&
2286                  atomic_read(&root->log_commit[index]));
2287         return 0;
2288 }
2289
2290 static void wait_for_writer(struct btrfs_trans_handle *trans,
2291                             struct btrfs_root *root)
2292 {
2293         DEFINE_WAIT(wait);
2294         while (root->fs_info->last_trans_log_full_commit !=
2295                trans->transid && atomic_read(&root->log_writers)) {
2296                 prepare_to_wait(&root->log_writer_wait,
2297                                 &wait, TASK_UNINTERRUPTIBLE);
2298                 mutex_unlock(&root->log_mutex);
2299                 if (root->fs_info->last_trans_log_full_commit !=
2300                     trans->transid && atomic_read(&root->log_writers))
2301                         schedule();
2302                 mutex_lock(&root->log_mutex);
2303                 finish_wait(&root->log_writer_wait, &wait);
2304         }
2305 }
2306
2307 /*
2308  * btrfs_sync_log does sends a given tree log down to the disk and
2309  * updates the super blocks to record it.  When this call is done,
2310  * you know that any inodes previously logged are safely on disk only
2311  * if it returns 0.
2312  *
2313  * Any other return value means you need to call btrfs_commit_transaction.
2314  * Some of the edge cases for fsyncing directories that have had unlinks
2315  * or renames done in the past mean that sometimes the only safe
2316  * fsync is to commit the whole FS.  When btrfs_sync_log returns -EAGAIN,
2317  * that has happened.
2318  */
2319 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2320                    struct btrfs_root *root)
2321 {
2322         int index1;
2323         int index2;
2324         int mark;
2325         int ret;
2326         struct btrfs_root *log = root->log_root;
2327         struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2328         unsigned long log_transid = 0;
2329
2330         mutex_lock(&root->log_mutex);
2331         log_transid = root->log_transid;
2332         index1 = root->log_transid % 2;
2333         if (atomic_read(&root->log_commit[index1])) {
2334                 wait_log_commit(trans, root, root->log_transid);
2335                 mutex_unlock(&root->log_mutex);
2336                 return 0;
2337         }
2338         atomic_set(&root->log_commit[index1], 1);
2339
2340         /* wait for previous tree log sync to complete */
2341         if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2342                 wait_log_commit(trans, root, root->log_transid - 1);
2343         while (1) {
2344                 int batch = atomic_read(&root->log_batch);
2345                 /* when we're on an ssd, just kick the log commit out */
2346                 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2347                         mutex_unlock(&root->log_mutex);
2348                         schedule_timeout_uninterruptible(1);
2349                         mutex_lock(&root->log_mutex);
2350                 }
2351                 wait_for_writer(trans, root);
2352                 if (batch == atomic_read(&root->log_batch))
2353                         break;
2354         }
2355
2356         /* bail out if we need to do a full commit */
2357         if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2358                 ret = -EAGAIN;
2359                 btrfs_free_logged_extents(log, log_transid);
2360                 mutex_unlock(&root->log_mutex);
2361                 goto out;
2362         }
2363
2364         if (log_transid % 2 == 0)
2365                 mark = EXTENT_DIRTY;
2366         else
2367                 mark = EXTENT_NEW;
2368
2369         /* we start IO on  all the marked extents here, but we don't actually
2370          * wait for them until later.
2371          */
2372         ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2373         if (ret) {
2374                 btrfs_abort_transaction(trans, root, ret);
2375                 btrfs_free_logged_extents(log, log_transid);
2376                 mutex_unlock(&root->log_mutex);
2377                 goto out;
2378         }
2379
2380         btrfs_set_root_node(&log->root_item, log->node);
2381
2382         root->log_transid++;
2383         log->log_transid = root->log_transid;
2384         root->log_start_pid = 0;
2385         smp_mb();
2386         /*
2387          * IO has been started, blocks of the log tree have WRITTEN flag set
2388          * in their headers. new modifications of the log will be written to
2389          * new positions. so it's safe to allow log writers to go in.
2390          */
2391         mutex_unlock(&root->log_mutex);
2392
2393         mutex_lock(&log_root_tree->log_mutex);
2394         atomic_inc(&log_root_tree->log_batch);
2395         atomic_inc(&log_root_tree->log_writers);
2396         mutex_unlock(&log_root_tree->log_mutex);
2397
2398         ret = update_log_root(trans, log);
2399
2400         mutex_lock(&log_root_tree->log_mutex);
2401         if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2402                 smp_mb();
2403                 if (waitqueue_active(&log_root_tree->log_writer_wait))
2404                         wake_up(&log_root_tree->log_writer_wait);
2405         }
2406
2407         if (ret) {
2408                 if (ret != -ENOSPC) {
2409                         btrfs_abort_transaction(trans, root, ret);
2410                         mutex_unlock(&log_root_tree->log_mutex);
2411                         goto out;
2412                 }
2413                 root->fs_info->last_trans_log_full_commit = trans->transid;
2414                 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2415                 btrfs_free_logged_extents(log, log_transid);
2416                 mutex_unlock(&log_root_tree->log_mutex);
2417                 ret = -EAGAIN;
2418                 goto out;
2419         }
2420
2421         index2 = log_root_tree->log_transid % 2;
2422         if (atomic_read(&log_root_tree->log_commit[index2])) {
2423                 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2424                 wait_log_commit(trans, log_root_tree,
2425                                 log_root_tree->log_transid);
2426                 btrfs_free_logged_extents(log, log_transid);
2427                 mutex_unlock(&log_root_tree->log_mutex);
2428                 ret = 0;
2429                 goto out;
2430         }
2431         atomic_set(&log_root_tree->log_commit[index2], 1);
2432
2433         if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2434                 wait_log_commit(trans, log_root_tree,
2435                                 log_root_tree->log_transid - 1);
2436         }
2437
2438         wait_for_writer(trans, log_root_tree);
2439
2440         /*
2441          * now that we've moved on to the tree of log tree roots,
2442          * check the full commit flag again
2443          */
2444         if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2445                 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2446                 btrfs_free_logged_extents(log, log_transid);
2447                 mutex_unlock(&log_root_tree->log_mutex);
2448                 ret = -EAGAIN;
2449                 goto out_wake_log_root;
2450         }
2451
2452         ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2453                                 &log_root_tree->dirty_log_pages,
2454                                 EXTENT_DIRTY | EXTENT_NEW);
2455         if (ret) {
2456                 btrfs_abort_transaction(trans, root, ret);
2457                 btrfs_free_logged_extents(log, log_transid);
2458                 mutex_unlock(&log_root_tree->log_mutex);
2459                 goto out_wake_log_root;
2460         }
2461         btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2462         btrfs_wait_logged_extents(log, log_transid);
2463
2464         btrfs_set_super_log_root(root->fs_info->super_for_commit,
2465                                 log_root_tree->node->start);
2466         btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2467                                 btrfs_header_level(log_root_tree->node));
2468
2469         log_root_tree->log_transid++;
2470         smp_mb();
2471
2472         mutex_unlock(&log_root_tree->log_mutex);
2473
2474         /*
2475          * nobody else is going to jump in and write the the ctree
2476          * super here because the log_commit atomic below is protecting
2477          * us.  We must be called with a transaction handle pinning
2478          * the running transaction open, so a full commit can't hop
2479          * in and cause problems either.
2480          */
2481         btrfs_scrub_pause_super(root);
2482         ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2483         btrfs_scrub_continue_super(root);
2484         if (ret) {
2485                 btrfs_abort_transaction(trans, root, ret);
2486                 goto out_wake_log_root;
2487         }
2488
2489         mutex_lock(&root->log_mutex);
2490         if (root->last_log_commit < log_transid)
2491                 root->last_log_commit = log_transid;
2492         mutex_unlock(&root->log_mutex);
2493
2494 out_wake_log_root:
2495         atomic_set(&log_root_tree->log_commit[index2], 0);
2496         smp_mb();
2497         if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2498                 wake_up(&log_root_tree->log_commit_wait[index2]);
2499 out:
2500         atomic_set(&root->log_commit[index1], 0);
2501         smp_mb();
2502         if (waitqueue_active(&root->log_commit_wait[index1]))
2503                 wake_up(&root->log_commit_wait[index1]);
2504         return ret;
2505 }
2506
2507 static void free_log_tree(struct btrfs_trans_handle *trans,
2508                           struct btrfs_root *log)
2509 {
2510         int ret;
2511         u64 start;
2512         u64 end;
2513         struct walk_control wc = {
2514                 .free = 1,
2515                 .process_func = process_one_buffer
2516         };
2517
2518         if (trans) {
2519                 ret = walk_log_tree(trans, log, &wc);
2520                 BUG_ON(ret);
2521         }
2522
2523         while (1) {
2524                 ret = find_first_extent_bit(&log->dirty_log_pages,
2525                                 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2526                                 NULL);
2527                 if (ret)
2528                         break;
2529
2530                 clear_extent_bits(&log->dirty_log_pages, start, end,
2531                                   EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2532         }
2533
2534         /*
2535          * We may have short-circuited the log tree with the full commit logic
2536          * and left ordered extents on our list, so clear these out to keep us
2537          * from leaking inodes and memory.
2538          */
2539         btrfs_free_logged_extents(log, 0);
2540         btrfs_free_logged_extents(log, 1);
2541
2542         free_extent_buffer(log->node);
2543         kfree(log);
2544 }
2545
2546 /*
2547  * free all the extents used by the tree log.  This should be called
2548  * at commit time of the full transaction
2549  */
2550 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2551 {
2552         if (root->log_root) {
2553                 free_log_tree(trans, root->log_root);
2554                 root->log_root = NULL;
2555         }
2556         return 0;
2557 }
2558
2559 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2560                              struct btrfs_fs_info *fs_info)
2561 {
2562         if (fs_info->log_root_tree) {
2563                 free_log_tree(trans, fs_info->log_root_tree);
2564                 fs_info->log_root_tree = NULL;
2565         }
2566         return 0;
2567 }
2568
2569 /*
2570  * If both a file and directory are logged, and unlinks or renames are
2571  * mixed in, we have a few interesting corners:
2572  *
2573  * create file X in dir Y
2574  * link file X to X.link in dir Y
2575  * fsync file X
2576  * unlink file X but leave X.link
2577  * fsync dir Y
2578  *
2579  * After a crash we would expect only X.link to exist.  But file X
2580  * didn't get fsync'd again so the log has back refs for X and X.link.
2581  *
2582  * We solve this by removing directory entries and inode backrefs from the
2583  * log when a file that was logged in the current transaction is
2584  * unlinked.  Any later fsync will include the updated log entries, and
2585  * we'll be able to reconstruct the proper directory items from backrefs.
2586  *
2587  * This optimizations allows us to avoid relogging the entire inode
2588  * or the entire directory.
2589  */
2590 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2591                                  struct btrfs_root *root,
2592                                  const char *name, int name_len,
2593                                  struct inode *dir, u64 index)
2594 {
2595         struct btrfs_root *log;
2596         struct btrfs_dir_item *di;
2597         struct btrfs_path *path;
2598         int ret;
2599         int err = 0;
2600         int bytes_del = 0;
2601         u64 dir_ino = btrfs_ino(dir);
2602
2603         if (BTRFS_I(dir)->logged_trans < trans->transid)
2604                 return 0;
2605
2606         ret = join_running_log_trans(root);
2607         if (ret)
2608                 return 0;
2609
2610         mutex_lock(&BTRFS_I(dir)->log_mutex);
2611
2612         log = root->log_root;
2613         path = btrfs_alloc_path();
2614         if (!path) {
2615                 err = -ENOMEM;
2616                 goto out_unlock;
2617         }
2618
2619         di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2620                                    name, name_len, -1);
2621         if (IS_ERR(di)) {
2622                 err = PTR_ERR(di);
2623                 goto fail;
2624         }
2625         if (di) {
2626                 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2627                 bytes_del += name_len;
2628                 BUG_ON(ret);
2629         }
2630         btrfs_release_path(path);
2631         di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2632                                          index, name, name_len, -1);
2633         if (IS_ERR(di)) {
2634                 err = PTR_ERR(di);
2635                 goto fail;
2636         }
2637         if (di) {
2638                 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2639                 bytes_del += name_len;
2640                 BUG_ON(ret);
2641         }
2642
2643         /* update the directory size in the log to reflect the names
2644          * we have removed
2645          */
2646         if (bytes_del) {
2647                 struct btrfs_key key;
2648
2649                 key.objectid = dir_ino;
2650                 key.offset = 0;
2651                 key.type = BTRFS_INODE_ITEM_KEY;
2652                 btrfs_release_path(path);
2653
2654                 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2655                 if (ret < 0) {
2656                         err = ret;
2657                         goto fail;
2658                 }
2659                 if (ret == 0) {
2660                         struct btrfs_inode_item *item;
2661                         u64 i_size;
2662
2663                         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2664                                               struct btrfs_inode_item);
2665                         i_size = btrfs_inode_size(path->nodes[0], item);
2666                         if (i_size > bytes_del)
2667                                 i_size -= bytes_del;
2668                         else
2669                                 i_size = 0;
2670                         btrfs_set_inode_size(path->nodes[0], item, i_size);
2671                         btrfs_mark_buffer_dirty(path->nodes[0]);
2672                 } else
2673                         ret = 0;
2674                 btrfs_release_path(path);
2675         }
2676 fail:
2677         btrfs_free_path(path);
2678 out_unlock:
2679         mutex_unlock(&BTRFS_I(dir)->log_mutex);
2680         if (ret == -ENOSPC) {
2681                 root->fs_info->last_trans_log_full_commit = trans->transid;
2682                 ret = 0;
2683         } else if (ret < 0)
2684                 btrfs_abort_transaction(trans, root, ret);
2685
2686         btrfs_end_log_trans(root);
2687
2688         return err;
2689 }
2690
2691 /* see comments for btrfs_del_dir_entries_in_log */
2692 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2693                                struct btrfs_root *root,
2694                                const char *name, int name_len,
2695                                struct inode *inode, u64 dirid)
2696 {
2697         struct btrfs_root *log;
2698         u64 index;
2699         int ret;
2700
2701         if (BTRFS_I(inode)->logged_trans < trans->transid)
2702                 return 0;
2703
2704         ret = join_running_log_trans(root);
2705         if (ret)
2706                 return 0;
2707         log = root->log_root;
2708         mutex_lock(&BTRFS_I(inode)->log_mutex);
2709
2710         ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2711                                   dirid, &index);
2712         mutex_unlock(&BTRFS_I(inode)->log_mutex);
2713         if (ret == -ENOSPC) {
2714                 root->fs_info->last_trans_log_full_commit = trans->transid;
2715                 ret = 0;
2716         } else if (ret < 0 && ret != -ENOENT)
2717                 btrfs_abort_transaction(trans, root, ret);
2718         btrfs_end_log_trans(root);
2719
2720         return ret;
2721 }
2722
2723 /*
2724  * creates a range item in the log for 'dirid'.  first_offset and
2725  * last_offset tell us which parts of the key space the log should
2726  * be considered authoritative for.
2727  */
2728 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2729                                        struct btrfs_root *log,
2730                                        struct btrfs_path *path,
2731                                        int key_type, u64 dirid,
2732                                        u64 first_offset, u64 last_offset)
2733 {
2734         int ret;
2735         struct btrfs_key key;
2736         struct btrfs_dir_log_item *item;
2737
2738         key.objectid = dirid;
2739         key.offset = first_offset;
2740         if (key_type == BTRFS_DIR_ITEM_KEY)
2741                 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2742         else
2743                 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2744         ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2745         if (ret)
2746                 return ret;
2747
2748         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2749                               struct btrfs_dir_log_item);
2750         btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2751         btrfs_mark_buffer_dirty(path->nodes[0]);
2752         btrfs_release_path(path);
2753         return 0;
2754 }
2755
2756 /*
2757  * log all the items included in the current transaction for a given
2758  * directory.  This also creates the range items in the log tree required
2759  * to replay anything deleted before the fsync
2760  */
2761 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2762                           struct btrfs_root *root, struct inode *inode,
2763                           struct btrfs_path *path,
2764                           struct btrfs_path *dst_path, int key_type,
2765                           u64 min_offset, u64 *last_offset_ret)
2766 {
2767         struct btrfs_key min_key;
2768         struct btrfs_key max_key;
2769         struct btrfs_root *log = root->log_root;
2770         struct extent_buffer *src;
2771         int err = 0;
2772         int ret;
2773         int i;
2774         int nritems;
2775         u64 first_offset = min_offset;
2776         u64 last_offset = (u64)-1;
2777         u64 ino = btrfs_ino(inode);
2778
2779         log = root->log_root;
2780         max_key.objectid = ino;
2781         max_key.offset = (u64)-1;
2782         max_key.type = key_type;
2783
2784         min_key.objectid = ino;
2785         min_key.type = key_type;
2786         min_key.offset = min_offset;
2787
2788         path->keep_locks = 1;
2789
2790         ret = btrfs_search_forward(root, &min_key, &max_key,
2791                                    path, trans->transid);
2792
2793         /*
2794          * we didn't find anything from this transaction, see if there
2795          * is anything at all
2796          */
2797         if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2798                 min_key.objectid = ino;
2799                 min_key.type = key_type;
2800                 min_key.offset = (u64)-1;
2801                 btrfs_release_path(path);
2802                 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2803                 if (ret < 0) {
2804                         btrfs_release_path(path);
2805                         return ret;
2806                 }
2807                 ret = btrfs_previous_item(root, path, ino, key_type);
2808
2809                 /* if ret == 0 there are items for this type,
2810                  * create a range to tell us the last key of this type.
2811                  * otherwise, there are no items in this directory after
2812                  * *min_offset, and we create a range to indicate that.
2813                  */
2814                 if (ret == 0) {
2815                         struct btrfs_key tmp;
2816                         btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2817                                               path->slots[0]);
2818                         if (key_type == tmp.type)
2819                                 first_offset = max(min_offset, tmp.offset) + 1;
2820                 }
2821                 goto done;
2822         }
2823
2824         /* go backward to find any previous key */
2825         ret = btrfs_previous_item(root, path, ino, key_type);
2826         if (ret == 0) {
2827                 struct btrfs_key tmp;
2828                 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2829                 if (key_type == tmp.type) {
2830                         first_offset = tmp.offset;
2831                         ret = overwrite_item(trans, log, dst_path,
2832                                              path->nodes[0], path->slots[0],
2833                                              &tmp);
2834                         if (ret) {
2835                                 err = ret;
2836                                 goto done;
2837                         }
2838                 }
2839         }
2840         btrfs_release_path(path);
2841
2842         /* find the first key from this transaction again */
2843         ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2844         if (ret != 0) {
2845                 WARN_ON(1);
2846                 goto done;
2847         }
2848
2849         /*
2850          * we have a block from this transaction, log every item in it
2851          * from our directory
2852          */
2853         while (1) {
2854                 struct btrfs_key tmp;
2855                 src = path->nodes[0];
2856                 nritems = btrfs_header_nritems(src);
2857                 for (i = path->slots[0]; i < nritems; i++) {
2858                         btrfs_item_key_to_cpu(src, &min_key, i);
2859
2860                         if (min_key.objectid != ino || min_key.type != key_type)
2861                                 goto done;
2862                         ret = overwrite_item(trans, log, dst_path, src, i,
2863                                              &min_key);
2864                         if (ret) {
2865                                 err = ret;
2866                                 goto done;
2867                         }
2868                 }
2869                 path->slots[0] = nritems;
2870
2871                 /*
2872                  * look ahead to the next item and see if it is also
2873                  * from this directory and from this transaction
2874                  */
2875                 ret = btrfs_next_leaf(root, path);
2876                 if (ret == 1) {
2877                         last_offset = (u64)-1;
2878                         goto done;
2879                 }
2880                 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2881                 if (tmp.objectid != ino || tmp.type != key_type) {
2882                         last_offset = (u64)-1;
2883                         goto done;
2884                 }
2885                 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2886                         ret = overwrite_item(trans, log, dst_path,
2887                                              path->nodes[0], path->slots[0],
2888                                              &tmp);
2889                         if (ret)
2890                                 err = ret;
2891                         else
2892                                 last_offset = tmp.offset;
2893                         goto done;
2894                 }
2895         }
2896 done:
2897         btrfs_release_path(path);
2898         btrfs_release_path(dst_path);
2899
2900         if (err == 0) {
2901                 *last_offset_ret = last_offset;
2902                 /*
2903                  * insert the log range keys to indicate where the log
2904                  * is valid
2905                  */
2906                 ret = insert_dir_log_key(trans, log, path, key_type,
2907                                          ino, first_offset, last_offset);
2908                 if (ret)
2909                         err = ret;
2910         }
2911         return err;
2912 }
2913
2914 /*
2915  * logging directories is very similar to logging inodes, We find all the items
2916  * from the current transaction and write them to the log.
2917  *
2918  * The recovery code scans the directory in the subvolume, and if it finds a
2919  * key in the range logged that is not present in the log tree, then it means
2920  * that dir entry was unlinked during the transaction.
2921  *
2922  * In order for that scan to work, we must include one key smaller than
2923  * the smallest logged by this transaction and one key larger than the largest
2924  * key logged by this transaction.
2925  */
2926 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2927                           struct btrfs_root *root, struct inode *inode,
2928                           struct btrfs_path *path,
2929                           struct btrfs_path *dst_path)
2930 {
2931         u64 min_key;
2932         u64 max_key;
2933         int ret;
2934         int key_type = BTRFS_DIR_ITEM_KEY;
2935
2936 again:
2937         min_key = 0;
2938         max_key = 0;
2939         while (1) {
2940                 ret = log_dir_items(trans, root, inode, path,
2941                                     dst_path, key_type, min_key,
2942                                     &max_key);
2943                 if (ret)
2944                         return ret;
2945                 if (max_key == (u64)-1)
2946                         break;
2947                 min_key = max_key + 1;
2948         }
2949
2950         if (key_type == BTRFS_DIR_ITEM_KEY) {
2951                 key_type = BTRFS_DIR_INDEX_KEY;
2952                 goto again;
2953         }
2954         return 0;
2955 }
2956
2957 /*
2958  * a helper function to drop items from the log before we relog an
2959  * inode.  max_key_type indicates the highest item type to remove.
2960  * This cannot be run for file data extents because it does not
2961  * free the extents they point to.
2962  */
2963 static int drop_objectid_items(struct btrfs_trans_handle *trans,
2964                                   struct btrfs_root *log,
2965                                   struct btrfs_path *path,
2966                                   u64 objectid, int max_key_type)
2967 {
2968         int ret;
2969         struct btrfs_key key;
2970         struct btrfs_key found_key;
2971         int start_slot;
2972
2973         key.objectid = objectid;
2974         key.type = max_key_type;
2975         key.offset = (u64)-1;
2976
2977         while (1) {
2978                 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2979                 BUG_ON(ret == 0);
2980                 if (ret < 0)
2981                         break;
2982
2983                 if (path->slots[0] == 0)
2984                         break;
2985
2986                 path->slots[0]--;
2987                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2988                                       path->slots[0]);
2989
2990                 if (found_key.objectid != objectid)
2991                         break;
2992
2993                 found_key.offset = 0;
2994                 found_key.type = 0;
2995                 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
2996                                        &start_slot);
2997
2998                 ret = btrfs_del_items(trans, log, path, start_slot,
2999                                       path->slots[0] - start_slot + 1);
3000                 /*
3001                  * If start slot isn't 0 then we don't need to re-search, we've
3002                  * found the last guy with the objectid in this tree.
3003                  */
3004                 if (ret || start_slot != 0)
3005                         break;
3006                 btrfs_release_path(path);
3007         }
3008         btrfs_release_path(path);
3009         if (ret > 0)
3010                 ret = 0;
3011         return ret;
3012 }
3013
3014 static void fill_inode_item(struct btrfs_trans_handle *trans,
3015                             struct extent_buffer *leaf,
3016                             struct btrfs_inode_item *item,
3017                             struct inode *inode, int log_inode_only)
3018 {
3019         struct btrfs_map_token token;
3020
3021         btrfs_init_map_token(&token);
3022
3023         if (log_inode_only) {
3024                 /* set the generation to zero so the recover code
3025                  * can tell the difference between an logging
3026                  * just to say 'this inode exists' and a logging
3027                  * to say 'update this inode with these values'
3028                  */
3029                 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3030                 btrfs_set_token_inode_size(leaf, item, 0, &token);
3031         } else {
3032                 btrfs_set_token_inode_generation(leaf, item,
3033                                                  BTRFS_I(inode)->generation,
3034                                                  &token);
3035                 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3036         }
3037
3038         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3039         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3040         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3041         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3042
3043         btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3044                                      inode->i_atime.tv_sec, &token);
3045         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3046                                       inode->i_atime.tv_nsec, &token);
3047
3048         btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3049                                      inode->i_mtime.tv_sec, &token);
3050         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3051                                       inode->i_mtime.tv_nsec, &token);
3052
3053         btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3054                                      inode->i_ctime.tv_sec, &token);
3055         btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3056                                       inode->i_ctime.tv_nsec, &token);
3057
3058         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3059                                      &token);
3060
3061         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3062         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3063         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3064         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3065         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3066 }
3067
3068 static int log_inode_item(struct btrfs_trans_handle *trans,
3069                           struct btrfs_root *log, struct btrfs_path *path,
3070                           struct inode *inode)
3071 {
3072         struct btrfs_inode_item *inode_item;
3073         struct btrfs_key key;
3074         int ret;
3075
3076         memcpy(&key, &BTRFS_I(inode)->location, sizeof(key));
3077         ret = btrfs_insert_empty_item(trans, log, path, &key,
3078                                       sizeof(*inode_item));
3079         if (ret && ret != -EEXIST)
3080                 return ret;
3081         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3082                                     struct btrfs_inode_item);
3083         fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3084         btrfs_release_path(path);
3085         return 0;
3086 }
3087
3088 static noinline int copy_items(struct btrfs_trans_handle *trans,
3089                                struct inode *inode,
3090                                struct btrfs_path *dst_path,
3091                                struct extent_buffer *src,
3092                                int start_slot, int nr, int inode_only)
3093 {
3094         unsigned long src_offset;
3095         unsigned long dst_offset;
3096         struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3097         struct btrfs_file_extent_item *extent;
3098         struct btrfs_inode_item *inode_item;
3099         int ret;
3100         struct btrfs_key *ins_keys;
3101         u32 *ins_sizes;
3102         char *ins_data;
3103         int i;
3104         struct list_head ordered_sums;
3105         int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3106
3107         INIT_LIST_HEAD(&ordered_sums);
3108
3109         ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3110                            nr * sizeof(u32), GFP_NOFS);
3111         if (!ins_data)
3112                 return -ENOMEM;
3113
3114         ins_sizes = (u32 *)ins_data;
3115         ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3116
3117         for (i = 0; i < nr; i++) {
3118                 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3119                 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3120         }
3121         ret = btrfs_insert_empty_items(trans, log, dst_path,
3122                                        ins_keys, ins_sizes, nr);
3123         if (ret) {
3124                 kfree(ins_data);
3125                 return ret;
3126         }
3127
3128         for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3129                 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3130                                                    dst_path->slots[0]);
3131
3132                 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3133
3134                 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3135                         inode_item = btrfs_item_ptr(dst_path->nodes[0],
3136                                                     dst_path->slots[0],
3137                                                     struct btrfs_inode_item);
3138                         fill_inode_item(trans, dst_path->nodes[0], inode_item,
3139                                         inode, inode_only == LOG_INODE_EXISTS);
3140                 } else {
3141                         copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3142                                            src_offset, ins_sizes[i]);
3143                 }
3144
3145                 /* take a reference on file data extents so that truncates
3146                  * or deletes of this inode don't have to relog the inode
3147                  * again
3148                  */
3149                 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
3150                     !skip_csum) {
3151                         int found_type;
3152                         extent = btrfs_item_ptr(src, start_slot + i,
3153                                                 struct btrfs_file_extent_item);
3154
3155                         if (btrfs_file_extent_generation(src, extent) < trans->transid)
3156                                 continue;
3157
3158                         found_type = btrfs_file_extent_type(src, extent);
3159                         if (found_type == BTRFS_FILE_EXTENT_REG) {
3160                                 u64 ds, dl, cs, cl;
3161                                 ds = btrfs_file_extent_disk_bytenr(src,
3162                                                                 extent);
3163                                 /* ds == 0 is a hole */
3164                                 if (ds == 0)
3165                                         continue;
3166
3167                                 dl = btrfs_file_extent_disk_num_bytes(src,
3168                                                                 extent);
3169                                 cs = btrfs_file_extent_offset(src, extent);
3170                                 cl = btrfs_file_extent_num_bytes(src,
3171                                                                 extent);
3172                                 if (btrfs_file_extent_compression(src,
3173                                                                   extent)) {
3174                                         cs = 0;
3175                                         cl = dl;
3176                                 }
3177
3178                                 ret = btrfs_lookup_csums_range(
3179                                                 log->fs_info->csum_root,
3180                                                 ds + cs, ds + cs + cl - 1,
3181                                                 &ordered_sums, 0);
3182                                 BUG_ON(ret);
3183                         }
3184                 }
3185         }
3186
3187         btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3188         btrfs_release_path(dst_path);
3189         kfree(ins_data);
3190
3191         /*
3192          * we have to do this after the loop above to avoid changing the
3193          * log tree while trying to change the log tree.
3194          */
3195         ret = 0;
3196         while (!list_empty(&ordered_sums)) {
3197                 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3198                                                    struct btrfs_ordered_sum,
3199                                                    list);
3200                 if (!ret)
3201                         ret = btrfs_csum_file_blocks(trans, log, sums);
3202                 list_del(&sums->list);
3203                 kfree(sums);
3204         }
3205         return ret;
3206 }
3207
3208 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3209 {
3210         struct extent_map *em1, *em2;
3211
3212         em1 = list_entry(a, struct extent_map, list);
3213         em2 = list_entry(b, struct extent_map, list);
3214
3215         if (em1->start < em2->start)
3216                 return -1;
3217         else if (em1->start > em2->start)
3218                 return 1;
3219         return 0;
3220 }
3221
3222 static int log_one_extent(struct btrfs_trans_handle *trans,
3223                           struct inode *inode, struct btrfs_root *root,
3224                           struct extent_map *em, struct btrfs_path *path)
3225 {
3226         struct btrfs_root *log = root->log_root;
3227         struct btrfs_file_extent_item *fi;
3228         struct extent_buffer *leaf;
3229         struct btrfs_ordered_extent *ordered;
3230         struct list_head ordered_sums;
3231         struct btrfs_map_token token;
3232         struct btrfs_key key;
3233         u64 mod_start = em->mod_start;
3234         u64 mod_len = em->mod_len;
3235         u64 csum_offset;
3236         u64 csum_len;
3237         u64 extent_offset = em->start - em->orig_start;
3238         u64 block_len;
3239         int ret;
3240         int index = log->log_transid % 2;
3241         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3242
3243         ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3244                                    em->start + em->len, NULL, 0);
3245         if (ret)
3246                 return ret;
3247
3248         INIT_LIST_HEAD(&ordered_sums);
3249         btrfs_init_map_token(&token);
3250         key.objectid = btrfs_ino(inode);
3251         key.type = BTRFS_EXTENT_DATA_KEY;
3252         key.offset = em->start;
3253
3254         ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
3255         if (ret)
3256                 return ret;
3257         leaf = path->nodes[0];
3258         fi = btrfs_item_ptr(leaf, path->slots[0],
3259                             struct btrfs_file_extent_item);
3260
3261         btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
3262                                                &token);
3263         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3264                 skip_csum = true;
3265                 btrfs_set_token_file_extent_type(leaf, fi,
3266                                                  BTRFS_FILE_EXTENT_PREALLOC,
3267                                                  &token);
3268         } else {
3269                 btrfs_set_token_file_extent_type(leaf, fi,
3270                                                  BTRFS_FILE_EXTENT_REG,
3271                                                  &token);
3272                 if (em->block_start == 0)
3273                         skip_csum = true;
3274         }
3275
3276         block_len = max(em->block_len, em->orig_block_len);
3277         if (em->compress_type != BTRFS_COMPRESS_NONE) {
3278                 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3279                                                         em->block_start,
3280                                                         &token);
3281                 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3282                                                            &token);
3283         } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3284                 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3285                                                         em->block_start -
3286                                                         extent_offset, &token);
3287                 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3288                                                            &token);
3289         } else {
3290                 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3291                 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3292                                                            &token);
3293         }
3294
3295         btrfs_set_token_file_extent_offset(leaf, fi,
3296                                            em->start - em->orig_start,
3297                                            &token);
3298         btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3299         btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3300         btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3301                                                 &token);
3302         btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3303         btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3304         btrfs_mark_buffer_dirty(leaf);
3305
3306         btrfs_release_path(path);
3307         if (ret) {
3308                 return ret;
3309         }
3310
3311         if (skip_csum)
3312                 return 0;
3313
3314         if (em->compress_type) {
3315                 csum_offset = 0;
3316                 csum_len = block_len;
3317         }
3318
3319         /*
3320          * First check and see if our csums are on our outstanding ordered
3321          * extents.
3322          */
3323 again:
3324         spin_lock_irq(&log->log_extents_lock[index]);
3325         list_for_each_entry(ordered, &log->logged_list[index], log_list) {
3326                 struct btrfs_ordered_sum *sum;
3327
3328                 if (!mod_len)
3329                         break;
3330
3331                 if (ordered->inode != inode)
3332                         continue;
3333
3334                 if (ordered->file_offset + ordered->len <= mod_start ||
3335                     mod_start + mod_len <= ordered->file_offset)
3336                         continue;
3337
3338                 /*
3339                  * We are going to copy all the csums on this ordered extent, so
3340                  * go ahead and adjust mod_start and mod_len in case this
3341                  * ordered extent has already been logged.
3342                  */
3343                 if (ordered->file_offset > mod_start) {
3344                         if (ordered->file_offset + ordered->len >=
3345                             mod_start + mod_len)
3346                                 mod_len = ordered->file_offset - mod_start;
3347                         /*
3348                          * If we have this case
3349                          *
3350                          * |--------- logged extent ---------|
3351                          *       |----- ordered extent ----|
3352                          *
3353                          * Just don't mess with mod_start and mod_len, we'll
3354                          * just end up logging more csums than we need and it
3355                          * will be ok.
3356                          */
3357                 } else {
3358                         if (ordered->file_offset + ordered->len <
3359                             mod_start + mod_len) {
3360                                 mod_len = (mod_start + mod_len) -
3361                                         (ordered->file_offset + ordered->len);
3362                                 mod_start = ordered->file_offset +
3363                                         ordered->len;
3364                         } else {
3365                                 mod_len = 0;
3366                         }
3367                 }
3368
3369                 /*
3370                  * To keep us from looping for the above case of an ordered
3371                  * extent that falls inside of the logged extent.
3372                  */
3373                 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3374                                      &ordered->flags))
3375                         continue;
3376                 atomic_inc(&ordered->refs);
3377                 spin_unlock_irq(&log->log_extents_lock[index]);
3378                 /*
3379                  * we've dropped the lock, we must either break or
3380                  * start over after this.
3381                  */
3382
3383                 wait_event(ordered->wait, ordered->csum_bytes_left == 0);
3384
3385                 list_for_each_entry(sum, &ordered->list, list) {
3386                         ret = btrfs_csum_file_blocks(trans, log, sum);
3387                         if (ret) {
3388                                 btrfs_put_ordered_extent(ordered);
3389                                 goto unlocked;
3390                         }
3391                 }
3392                 btrfs_put_ordered_extent(ordered);
3393                 goto again;
3394
3395         }
3396         spin_unlock_irq(&log->log_extents_lock[index]);
3397 unlocked:
3398
3399         if (!mod_len || ret)
3400                 return ret;
3401
3402         csum_offset = mod_start - em->start;
3403         csum_len = mod_len;
3404
3405         /* block start is already adjusted for the file extent offset. */
3406         ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3407                                        em->block_start + csum_offset,
3408                                        em->block_start + csum_offset +
3409                                        csum_len - 1, &ordered_sums, 0);
3410         if (ret)
3411                 return ret;
3412
3413         while (!list_empty(&ordered_sums)) {
3414                 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3415                                                    struct btrfs_ordered_sum,
3416                                                    list);
3417                 if (!ret)
3418                         ret = btrfs_csum_file_blocks(trans, log, sums);
3419                 list_del(&sums->list);
3420                 kfree(sums);
3421         }
3422
3423         return ret;
3424 }
3425
3426 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3427                                      struct btrfs_root *root,
3428                                      struct inode *inode,
3429                                      struct btrfs_path *path)
3430 {
3431         struct extent_map *em, *n;
3432         struct list_head extents;
3433         struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3434         u64 test_gen;
3435         int ret = 0;
3436         int num = 0;
3437
3438         INIT_LIST_HEAD(&extents);
3439
3440         write_lock(&tree->lock);
3441         test_gen = root->fs_info->last_trans_committed;
3442
3443         list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3444                 list_del_init(&em->list);
3445
3446                 /*
3447                  * Just an arbitrary number, this can be really CPU intensive
3448                  * once we start getting a lot of extents, and really once we
3449                  * have a bunch of extents we just want to commit since it will
3450                  * be faster.
3451                  */
3452                 if (++num > 32768) {
3453                         list_del_init(&tree->modified_extents);
3454                         ret = -EFBIG;
3455                         goto process;
3456                 }
3457
3458                 if (em->generation <= test_gen)
3459                         continue;
3460                 /* Need a ref to keep it from getting evicted from cache */
3461                 atomic_inc(&em->refs);
3462                 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3463                 list_add_tail(&em->list, &extents);
3464                 num++;
3465         }
3466
3467         list_sort(NULL, &extents, extent_cmp);
3468
3469 process:
3470         while (!list_empty(&extents)) {
3471                 em = list_entry(extents.next, struct extent_map, list);
3472
3473                 list_del_init(&em->list);
3474
3475                 /*
3476                  * If we had an error we just need to delete everybody from our
3477                  * private list.
3478                  */
3479                 if (ret) {
3480                         clear_em_logging(tree, em);
3481                         free_extent_map(em);
3482                         continue;
3483                 }
3484
3485                 write_unlock(&tree->lock);
3486
3487                 ret = log_one_extent(trans, inode, root, em, path);
3488                 write_lock(&tree->lock);
3489                 clear_em_logging(tree, em);
3490                 free_extent_map(em);
3491         }
3492         WARN_ON(!list_empty(&extents));
3493         write_unlock(&tree->lock);
3494
3495         btrfs_release_path(path);
3496         return ret;
3497 }
3498
3499 /* log a single inode in the tree log.
3500  * At least one parent directory for this inode must exist in the tree
3501  * or be logged already.
3502  *
3503  * Any items from this inode changed by the current transaction are copied
3504  * to the log tree.  An extra reference is taken on any extents in this
3505  * file, allowing us to avoid a whole pile of corner cases around logging
3506  * blocks that have been removed from the tree.
3507  *
3508  * See LOG_INODE_ALL and related defines for a description of what inode_only
3509  * does.
3510  *
3511  * This handles both files and directories.
3512  */
3513 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3514                              struct btrfs_root *root, struct inode *inode,
3515                              int inode_only)
3516 {
3517         struct btrfs_path *path;
3518         struct btrfs_path *dst_path;
3519         struct btrfs_key min_key;
3520         struct btrfs_key max_key;
3521         struct btrfs_root *log = root->log_root;
3522         struct extent_buffer *src = NULL;
3523         int err = 0;
3524         int ret;
3525         int nritems;
3526         int ins_start_slot = 0;
3527         int ins_nr;
3528         bool fast_search = false;
3529         u64 ino = btrfs_ino(inode);
3530
3531         path = btrfs_alloc_path();
3532         if (!path)
3533                 return -ENOMEM;
3534         dst_path = btrfs_alloc_path();
3535         if (!dst_path) {
3536                 btrfs_free_path(path);
3537                 return -ENOMEM;
3538         }
3539
3540         min_key.objectid = ino;
3541         min_key.type = BTRFS_INODE_ITEM_KEY;
3542         min_key.offset = 0;
3543
3544         max_key.objectid = ino;
3545
3546
3547         /* today the code can only do partial logging of directories */
3548         if (S_ISDIR(inode->i_mode) ||
3549             (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3550                        &BTRFS_I(inode)->runtime_flags) &&
3551              inode_only == LOG_INODE_EXISTS))
3552                 max_key.type = BTRFS_XATTR_ITEM_KEY;
3553         else
3554                 max_key.type = (u8)-1;
3555         max_key.offset = (u64)-1;
3556
3557         /* Only run delayed items if we are a dir or a new file */
3558         if (S_ISDIR(inode->i_mode) ||
3559             BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3560                 ret = btrfs_commit_inode_delayed_items(trans, inode);
3561                 if (ret) {
3562                         btrfs_free_path(path);
3563                         btrfs_free_path(dst_path);
3564                         return ret;
3565                 }
3566         }
3567
3568         mutex_lock(&BTRFS_I(inode)->log_mutex);
3569
3570         btrfs_get_logged_extents(log, inode);
3571
3572         /*
3573          * a brute force approach to making sure we get the most uptodate
3574          * copies of everything.
3575          */
3576         if (S_ISDIR(inode->i_mode)) {
3577                 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3578
3579                 if (inode_only == LOG_INODE_EXISTS)
3580                         max_key_type = BTRFS_XATTR_ITEM_KEY;
3581                 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3582         } else {
3583                 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3584                                        &BTRFS_I(inode)->runtime_flags)) {
3585                         clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3586                                   &BTRFS_I(inode)->runtime_flags);
3587                         ret = btrfs_truncate_inode_items(trans, log,
3588                                                          inode, 0, 0);
3589                 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3590                                               &BTRFS_I(inode)->runtime_flags)) {
3591                         if (inode_only == LOG_INODE_ALL)
3592                                 fast_search = true;
3593                         max_key.type = BTRFS_XATTR_ITEM_KEY;
3594                         ret = drop_objectid_items(trans, log, path, ino,
3595                                                   max_key.type);
3596                 } else {
3597                         if (inode_only == LOG_INODE_ALL)
3598                                 fast_search = true;
3599                         ret = log_inode_item(trans, log, dst_path, inode);
3600                         if (ret) {
3601                                 err = ret;
3602                                 goto out_unlock;
3603                         }
3604                         goto log_extents;
3605                 }
3606
3607         }
3608         if (ret) {
3609                 err = ret;
3610                 goto out_unlock;
3611         }
3612         path->keep_locks = 1;
3613
3614         while (1) {
3615                 ins_nr = 0;
3616                 ret = btrfs_search_forward(root, &min_key, &max_key,
3617                                            path, trans->transid);
3618                 if (ret != 0)
3619                         break;
3620 again:
3621                 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3622                 if (min_key.objectid != ino)
3623                         break;
3624                 if (min_key.type > max_key.type)
3625                         break;
3626
3627                 src = path->nodes[0];
3628                 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3629                         ins_nr++;
3630                         goto next_slot;
3631                 } else if (!ins_nr) {
3632                         ins_start_slot = path->slots[0];
3633                         ins_nr = 1;
3634                         goto next_slot;
3635                 }
3636
3637                 ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
3638                                  ins_nr, inode_only);
3639                 if (ret) {
3640                         err = ret;
3641                         goto out_unlock;
3642                 }
3643                 ins_nr = 1;
3644                 ins_start_slot = path->slots[0];
3645 next_slot:
3646
3647                 nritems = btrfs_header_nritems(path->nodes[0]);
3648                 path->slots[0]++;
3649                 if (path->slots[0] < nritems) {
3650                         btrfs_item_key_to_cpu(path->nodes[0], &min_key,
3651                                               path->slots[0]);
3652                         goto again;
3653                 }
3654                 if (ins_nr) {
3655                         ret = copy_items(trans, inode, dst_path, src,
3656                                          ins_start_slot,
3657                                          ins_nr, inode_only);
3658                         if (ret) {
3659                                 err = ret;
3660                                 goto out_unlock;
3661                         }
3662                         ins_nr = 0;
3663                 }
3664                 btrfs_release_path(path);
3665
3666                 if (min_key.offset < (u64)-1)
3667                         min_key.offset++;
3668                 else if (min_key.type < (u8)-1)
3669                         min_key.type++;
3670                 else if (min_key.objectid < (u64)-1)
3671                         min_key.objectid++;
3672                 else
3673                         break;
3674         }
3675         if (ins_nr) {
3676                 ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
3677                                  ins_nr, inode_only);
3678                 if (ret) {
3679                         err = ret;
3680                         goto out_unlock;
3681                 }
3682                 ins_nr = 0;
3683         }
3684
3685 log_extents:
3686         if (fast_search) {
3687                 btrfs_release_path(dst_path);
3688                 ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
3689                 if (ret) {
3690                         err = ret;
3691                         goto out_unlock;
3692                 }
3693         } else {
3694                 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3695                 struct extent_map *em, *n;
3696
3697                 write_lock(&tree->lock);
3698                 list_for_each_entry_safe(em, n, &tree->modified_extents, list)
3699                         list_del_init(&em->list);
3700                 write_unlock(&tree->lock);
3701         }
3702
3703         if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
3704                 btrfs_release_path(path);
3705                 btrfs_release_path(dst_path);
3706                 ret = log_directory_changes(trans, root, inode, path, dst_path);
3707                 if (ret) {
3708                         err = ret;
3709                         goto out_unlock;
3710                 }
3711         }
3712         BTRFS_I(inode)->logged_trans = trans->transid;
3713         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
3714 out_unlock:
3715         if (err)
3716                 btrfs_free_logged_extents(log, log->log_transid);
3717         mutex_unlock(&BTRFS_I(inode)->log_mutex);
3718
3719         btrfs_free_path(path);
3720         btrfs_free_path(dst_path);
3721         return err;
3722 }
3723
3724 /*
3725  * follow the dentry parent pointers up the chain and see if any
3726  * of the directories in it require a full commit before they can
3727  * be logged.  Returns zero if nothing special needs to be done or 1 if
3728  * a full commit is required.
3729  */
3730 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
3731                                                struct inode *inode,
3732                                                struct dentry *parent,
3733                                                struct super_block *sb,
3734                                                u64 last_committed)
3735 {
3736         int ret = 0;
3737         struct btrfs_root *root;
3738         struct dentry *old_parent = NULL;
3739
3740         /*
3741          * for regular files, if its inode is already on disk, we don't
3742          * have to worry about the parents at all.  This is because
3743          * we can use the last_unlink_trans field to record renames
3744          * and other fun in this file.
3745          */
3746         if (S_ISREG(inode->i_mode) &&
3747             BTRFS_I(inode)->generation <= last_committed &&
3748             BTRFS_I(inode)->last_unlink_trans <= last_committed)
3749                         goto out;
3750
3751         if (!S_ISDIR(inode->i_mode)) {
3752                 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3753                         goto out;
3754                 inode = parent->d_inode;
3755         }
3756
3757         while (1) {
3758                 BTRFS_I(inode)->logged_trans = trans->transid;
3759                 smp_mb();
3760
3761                 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
3762                         root = BTRFS_I(inode)->root;
3763
3764                         /*
3765                          * make sure any commits to the log are forced
3766                          * to be full commits
3767                          */
3768                         root->fs_info->last_trans_log_full_commit =
3769                                 trans->transid;
3770                         ret = 1;
3771                         break;
3772                 }
3773
3774                 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3775                         break;
3776
3777                 if (IS_ROOT(parent))
3778                         break;
3779
3780                 parent = dget_parent(parent);
3781                 dput(old_parent);
3782                 old_parent = parent;
3783                 inode = parent->d_inode;
3784
3785         }
3786         dput(old_parent);
3787 out:
3788         return ret;
3789 }
3790
3791 /*
3792  * helper function around btrfs_log_inode to make sure newly created
3793  * parent directories also end up in the log.  A minimal inode and backref
3794  * only logging is done of any parent directories that are older than
3795  * the last committed transaction
3796  */
3797 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3798                     struct btrfs_root *root, struct inode *inode,
3799                     struct dentry *parent, int exists_only)
3800 {
3801         int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
3802         struct super_block *sb;
3803         struct dentry *old_parent = NULL;
3804         int ret = 0;
3805         u64 last_committed = root->fs_info->last_trans_committed;
3806
3807         sb = inode->i_sb;
3808
3809         if (btrfs_test_opt(root, NOTREELOG)) {
3810                 ret = 1;
3811                 goto end_no_trans;
3812         }
3813
3814         if (root->fs_info->last_trans_log_full_commit >
3815             root->fs_info->last_trans_committed) {
3816                 ret = 1;
3817                 goto end_no_trans;
3818         }
3819
3820         if (root != BTRFS_I(inode)->root ||
3821             btrfs_root_refs(&root->root_item) == 0) {
3822                 ret = 1;
3823                 goto end_no_trans;
3824         }
3825
3826         ret = check_parent_dirs_for_sync(trans, inode, parent,
3827                                          sb, last_committed);
3828         if (ret)
3829                 goto end_no_trans;
3830
3831         if (btrfs_inode_in_log(inode, trans->transid)) {
3832                 ret = BTRFS_NO_LOG_SYNC;
3833                 goto end_no_trans;
3834         }
3835
3836         ret = start_log_trans(trans, root);
3837         if (ret)
3838                 goto end_trans;
3839
3840         ret = btrfs_log_inode(trans, root, inode, inode_only);
3841         if (ret)
3842                 goto end_trans;
3843
3844         /*
3845          * for regular files, if its inode is already on disk, we don't
3846          * have to worry about the parents at all.  This is because
3847          * we can use the last_unlink_trans field to record renames
3848          * and other fun in this file.
3849          */
3850         if (S_ISREG(inode->i_mode) &&
3851             BTRFS_I(inode)->generation <= last_committed &&
3852             BTRFS_I(inode)->last_unlink_trans <= last_committed) {
3853                 ret = 0;
3854                 goto end_trans;
3855         }
3856
3857         inode_only = LOG_INODE_EXISTS;
3858         while (1) {
3859                 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3860                         break;
3861
3862                 inode = parent->d_inode;
3863                 if (root != BTRFS_I(inode)->root)
3864                         break;
3865
3866                 if (BTRFS_I(inode)->generation >
3867                     root->fs_info->last_trans_committed) {
3868                         ret = btrfs_log_inode(trans, root, inode, inode_only);
3869                         if (ret)
3870                                 goto end_trans;
3871                 }
3872                 if (IS_ROOT(parent))
3873                         break;
3874
3875                 parent = dget_parent(parent);
3876                 dput(old_parent);
3877                 old_parent = parent;
3878         }
3879         ret = 0;
3880 end_trans:
3881         dput(old_parent);
3882         if (ret < 0) {
3883                 root->fs_info->last_trans_log_full_commit = trans->transid;
3884                 ret = 1;
3885         }
3886         btrfs_end_log_trans(root);
3887 end_no_trans:
3888         return ret;
3889 }
3890
3891 /*
3892  * it is not safe to log dentry if the chunk root has added new
3893  * chunks.  This returns 0 if the dentry was logged, and 1 otherwise.
3894  * If this returns 1, you must commit the transaction to safely get your
3895  * data on disk.
3896  */
3897 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3898                           struct btrfs_root *root, struct dentry *dentry)
3899 {
3900         struct dentry *parent = dget_parent(dentry);
3901         int ret;
3902
3903         ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3904         dput(parent);
3905
3906         return ret;
3907 }
3908
3909 /*
3910  * should be called during mount to recover any replay any log trees
3911  * from the FS
3912  */
3913 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3914 {
3915         int ret;
3916         struct btrfs_path *path;
3917         struct btrfs_trans_handle *trans;
3918         struct btrfs_key key;
3919         struct btrfs_key found_key;
3920         struct btrfs_key tmp_key;
3921         struct btrfs_root *log;
3922         struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
3923         struct walk_control wc = {
3924                 .process_func = process_one_buffer,
3925                 .stage = 0,
3926         };
3927
3928         path = btrfs_alloc_path();
3929         if (!path)
3930                 return -ENOMEM;
3931
3932         fs_info->log_root_recovering = 1;
3933
3934         trans = btrfs_start_transaction(fs_info->tree_root, 0);
3935         if (IS_ERR(trans)) {
3936                 ret = PTR_ERR(trans);
3937                 goto error;
3938         }
3939
3940         wc.trans = trans;
3941         wc.pin = 1;
3942
3943         ret = walk_log_tree(trans, log_root_tree, &wc);
3944         if (ret) {
3945                 btrfs_error(fs_info, ret, "Failed to pin buffers while "
3946                             "recovering log root tree.");
3947                 goto error;
3948         }
3949
3950 again:
3951         key.objectid = BTRFS_TREE_LOG_OBJECTID;
3952         key.offset = (u64)-1;
3953         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
3954
3955         while (1) {
3956                 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3957
3958                 if (ret < 0) {
3959                         btrfs_error(fs_info, ret,
3960                                     "Couldn't find tree log root.");
3961                         goto error;
3962                 }
3963                 if (ret > 0) {
3964                         if (path->slots[0] == 0)
3965                                 break;
3966                         path->slots[0]--;
3967                 }
3968                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3969                                       path->slots[0]);
3970                 btrfs_release_path(path);
3971                 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3972                         break;
3973
3974                 log = btrfs_read_fs_root_no_radix(log_root_tree,
3975                                                   &found_key);
3976                 if (IS_ERR(log)) {
3977                         ret = PTR_ERR(log);
3978                         btrfs_error(fs_info, ret,
3979                                     "Couldn't read tree log root.");
3980                         goto error;
3981                 }
3982
3983                 tmp_key.objectid = found_key.offset;
3984                 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3985                 tmp_key.offset = (u64)-1;
3986
3987                 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3988                 if (IS_ERR(wc.replay_dest)) {
3989                         ret = PTR_ERR(wc.replay_dest);
3990                         free_extent_buffer(log->node);
3991                         free_extent_buffer(log->commit_root);
3992                         kfree(log);
3993                         btrfs_error(fs_info, ret, "Couldn't read target root "
3994                                     "for tree log recovery.");
3995                         goto error;
3996                 }
3997
3998                 wc.replay_dest->log_root = log;
3999                 btrfs_record_root_in_trans(trans, wc.replay_dest);
4000                 ret = walk_log_tree(trans, log, &wc);
4001
4002                 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4003                         ret = fixup_inode_link_counts(trans, wc.replay_dest,
4004                                                       path);
4005                 }
4006
4007                 key.offset = found_key.offset - 1;
4008                 wc.replay_dest->log_root = NULL;
4009                 free_extent_buffer(log->node);
4010                 free_extent_buffer(log->commit_root);
4011                 kfree(log);
4012
4013                 if (ret)
4014                         goto error;
4015
4016                 if (found_key.offset == 0)
4017                         break;
4018         }
4019         btrfs_release_path(path);
4020
4021         /* step one is to pin it all, step two is to replay just inodes */
4022         if (wc.pin) {
4023                 wc.pin = 0;
4024                 wc.process_func = replay_one_buffer;
4025                 wc.stage = LOG_WALK_REPLAY_INODES;
4026                 goto again;
4027         }
4028         /* step three is to replay everything */
4029         if (wc.stage < LOG_WALK_REPLAY_ALL) {
4030                 wc.stage++;
4031                 goto again;
4032         }
4033
4034         btrfs_free_path(path);
4035
4036         /* step 4: commit the transaction, which also unpins the blocks */
4037         ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4038         if (ret)
4039                 return ret;
4040
4041         free_extent_buffer(log_root_tree->node);
4042         log_root_tree->log_root = NULL;
4043         fs_info->log_root_recovering = 0;
4044         kfree(log_root_tree);
4045
4046         return 0;
4047 error:
4048         if (wc.trans)
4049                 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4050         btrfs_free_path(path);
4051         return ret;
4052 }
4053
4054 /*
4055  * there are some corner cases where we want to force a full
4056  * commit instead of allowing a directory to be logged.
4057  *
4058  * They revolve around files there were unlinked from the directory, and
4059  * this function updates the parent directory so that a full commit is
4060  * properly done if it is fsync'd later after the unlinks are done.
4061  */
4062 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4063                              struct inode *dir, struct inode *inode,
4064                              int for_rename)
4065 {
4066         /*
4067          * when we're logging a file, if it hasn't been renamed
4068          * or unlinked, and its inode is fully committed on disk,
4069          * we don't have to worry about walking up the directory chain
4070          * to log its parents.
4071          *
4072          * So, we use the last_unlink_trans field to put this transid
4073          * into the file.  When the file is logged we check it and
4074          * don't log the parents if the file is fully on disk.
4075          */
4076         if (S_ISREG(inode->i_mode))
4077                 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4078
4079         /*
4080          * if this directory was already logged any new
4081          * names for this file/dir will get recorded
4082          */
4083         smp_mb();
4084         if (BTRFS_I(dir)->logged_trans == trans->transid)
4085                 return;
4086
4087         /*
4088          * if the inode we're about to unlink was logged,
4089          * the log will be properly updated for any new names
4090          */
4091         if (BTRFS_I(inode)->logged_trans == trans->transid)
4092                 return;
4093
4094         /*
4095          * when renaming files across directories, if the directory
4096          * there we're unlinking from gets fsync'd later on, there's
4097          * no way to find the destination directory later and fsync it
4098          * properly.  So, we have to be conservative and force commits
4099          * so the new name gets discovered.
4100          */
4101         if (for_rename)
4102                 goto record;
4103
4104         /* we can safely do the unlink without any special recording */
4105         return;
4106
4107 record:
4108         BTRFS_I(dir)->last_unlink_trans = trans->transid;
4109 }
4110
4111 /*
4112  * Call this after adding a new name for a file and it will properly
4113  * update the log to reflect the new name.
4114  *
4115  * It will return zero if all goes well, and it will return 1 if a
4116  * full transaction commit is required.
4117  */
4118 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4119                         struct inode *inode, struct inode *old_dir,
4120                         struct dentry *parent)
4121 {
4122         struct btrfs_root * root = BTRFS_I(inode)->root;
4123
4124         /*
4125          * this will force the logging code to walk the dentry chain
4126          * up for the file
4127          */
4128         if (S_ISREG(inode->i_mode))
4129                 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4130
4131         /*
4132          * if this inode hasn't been logged and directory we're renaming it
4133          * from hasn't been logged, we don't need to log it
4134          */
4135         if (BTRFS_I(inode)->logged_trans <=
4136             root->fs_info->last_trans_committed &&
4137             (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4138                     root->fs_info->last_trans_committed))
4139                 return 0;
4140
4141         return btrfs_log_inode_parent(trans, root, inode, parent, 1);
4142 }
4143