netfilter: remove unnecessary goto statement for error recovery
[cascardo/linux.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44
45 #include <trace/events/ext4.h>
46
47 /*
48  * used by extent splitting.
49  */
50 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
51                                         due to ENOSPC */
52 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
53 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
54
55 static __le32 ext4_extent_block_csum(struct inode *inode,
56                                      struct ext4_extent_header *eh)
57 {
58         struct ext4_inode_info *ei = EXT4_I(inode);
59         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
60         __u32 csum;
61
62         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
63                            EXT4_EXTENT_TAIL_OFFSET(eh));
64         return cpu_to_le32(csum);
65 }
66
67 static int ext4_extent_block_csum_verify(struct inode *inode,
68                                          struct ext4_extent_header *eh)
69 {
70         struct ext4_extent_tail *et;
71
72         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
73                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
74                 return 1;
75
76         et = find_ext4_extent_tail(eh);
77         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
78                 return 0;
79         return 1;
80 }
81
82 static void ext4_extent_block_csum_set(struct inode *inode,
83                                        struct ext4_extent_header *eh)
84 {
85         struct ext4_extent_tail *et;
86
87         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
88                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
89                 return;
90
91         et = find_ext4_extent_tail(eh);
92         et->et_checksum = ext4_extent_block_csum(inode, eh);
93 }
94
95 static int ext4_split_extent(handle_t *handle,
96                                 struct inode *inode,
97                                 struct ext4_ext_path *path,
98                                 struct ext4_map_blocks *map,
99                                 int split_flag,
100                                 int flags);
101
102 static int ext4_split_extent_at(handle_t *handle,
103                              struct inode *inode,
104                              struct ext4_ext_path *path,
105                              ext4_lblk_t split,
106                              int split_flag,
107                              int flags);
108
109 static int ext4_ext_truncate_extend_restart(handle_t *handle,
110                                             struct inode *inode,
111                                             int needed)
112 {
113         int err;
114
115         if (!ext4_handle_valid(handle))
116                 return 0;
117         if (handle->h_buffer_credits > needed)
118                 return 0;
119         err = ext4_journal_extend(handle, needed);
120         if (err <= 0)
121                 return err;
122         err = ext4_truncate_restart_trans(handle, inode, needed);
123         if (err == 0)
124                 err = -EAGAIN;
125
126         return err;
127 }
128
129 /*
130  * could return:
131  *  - EROFS
132  *  - ENOMEM
133  */
134 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
135                                 struct ext4_ext_path *path)
136 {
137         if (path->p_bh) {
138                 /* path points to block */
139                 return ext4_journal_get_write_access(handle, path->p_bh);
140         }
141         /* path points to leaf/index in inode body */
142         /* we use in-core data, no need to protect them */
143         return 0;
144 }
145
146 /*
147  * could return:
148  *  - EROFS
149  *  - ENOMEM
150  *  - EIO
151  */
152 #define ext4_ext_dirty(handle, inode, path) \
153                 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
154 static int __ext4_ext_dirty(const char *where, unsigned int line,
155                             handle_t *handle, struct inode *inode,
156                             struct ext4_ext_path *path)
157 {
158         int err;
159         if (path->p_bh) {
160                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
161                 /* path points to block */
162                 err = __ext4_handle_dirty_metadata(where, line, handle,
163                                                    inode, path->p_bh);
164         } else {
165                 /* path points to leaf/index in inode body */
166                 err = ext4_mark_inode_dirty(handle, inode);
167         }
168         return err;
169 }
170
171 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
172                               struct ext4_ext_path *path,
173                               ext4_lblk_t block)
174 {
175         if (path) {
176                 int depth = path->p_depth;
177                 struct ext4_extent *ex;
178
179                 /*
180                  * Try to predict block placement assuming that we are
181                  * filling in a file which will eventually be
182                  * non-sparse --- i.e., in the case of libbfd writing
183                  * an ELF object sections out-of-order but in a way
184                  * the eventually results in a contiguous object or
185                  * executable file, or some database extending a table
186                  * space file.  However, this is actually somewhat
187                  * non-ideal if we are writing a sparse file such as
188                  * qemu or KVM writing a raw image file that is going
189                  * to stay fairly sparse, since it will end up
190                  * fragmenting the file system's free space.  Maybe we
191                  * should have some hueristics or some way to allow
192                  * userspace to pass a hint to file system,
193                  * especially if the latter case turns out to be
194                  * common.
195                  */
196                 ex = path[depth].p_ext;
197                 if (ex) {
198                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
199                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
200
201                         if (block > ext_block)
202                                 return ext_pblk + (block - ext_block);
203                         else
204                                 return ext_pblk - (ext_block - block);
205                 }
206
207                 /* it looks like index is empty;
208                  * try to find starting block from index itself */
209                 if (path[depth].p_bh)
210                         return path[depth].p_bh->b_blocknr;
211         }
212
213         /* OK. use inode's group */
214         return ext4_inode_to_goal_block(inode);
215 }
216
217 /*
218  * Allocation for a meta data block
219  */
220 static ext4_fsblk_t
221 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
222                         struct ext4_ext_path *path,
223                         struct ext4_extent *ex, int *err, unsigned int flags)
224 {
225         ext4_fsblk_t goal, newblock;
226
227         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
228         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
229                                         NULL, err);
230         return newblock;
231 }
232
233 static inline int ext4_ext_space_block(struct inode *inode, int check)
234 {
235         int size;
236
237         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
238                         / sizeof(struct ext4_extent);
239 #ifdef AGGRESSIVE_TEST
240         if (!check && size > 6)
241                 size = 6;
242 #endif
243         return size;
244 }
245
246 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
247 {
248         int size;
249
250         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
251                         / sizeof(struct ext4_extent_idx);
252 #ifdef AGGRESSIVE_TEST
253         if (!check && size > 5)
254                 size = 5;
255 #endif
256         return size;
257 }
258
259 static inline int ext4_ext_space_root(struct inode *inode, int check)
260 {
261         int size;
262
263         size = sizeof(EXT4_I(inode)->i_data);
264         size -= sizeof(struct ext4_extent_header);
265         size /= sizeof(struct ext4_extent);
266 #ifdef AGGRESSIVE_TEST
267         if (!check && size > 3)
268                 size = 3;
269 #endif
270         return size;
271 }
272
273 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
274 {
275         int size;
276
277         size = sizeof(EXT4_I(inode)->i_data);
278         size -= sizeof(struct ext4_extent_header);
279         size /= sizeof(struct ext4_extent_idx);
280 #ifdef AGGRESSIVE_TEST
281         if (!check && size > 4)
282                 size = 4;
283 #endif
284         return size;
285 }
286
287 /*
288  * Calculate the number of metadata blocks needed
289  * to allocate @blocks
290  * Worse case is one block per extent
291  */
292 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
293 {
294         struct ext4_inode_info *ei = EXT4_I(inode);
295         int idxs;
296
297         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
298                 / sizeof(struct ext4_extent_idx));
299
300         /*
301          * If the new delayed allocation block is contiguous with the
302          * previous da block, it can share index blocks with the
303          * previous block, so we only need to allocate a new index
304          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
305          * an additional index block, and at ldxs**3 blocks, yet
306          * another index blocks.
307          */
308         if (ei->i_da_metadata_calc_len &&
309             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
310                 int num = 0;
311
312                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
313                         num++;
314                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
315                         num++;
316                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
317                         num++;
318                         ei->i_da_metadata_calc_len = 0;
319                 } else
320                         ei->i_da_metadata_calc_len++;
321                 ei->i_da_metadata_calc_last_lblock++;
322                 return num;
323         }
324
325         /*
326          * In the worst case we need a new set of index blocks at
327          * every level of the inode's extent tree.
328          */
329         ei->i_da_metadata_calc_len = 1;
330         ei->i_da_metadata_calc_last_lblock = lblock;
331         return ext_depth(inode) + 1;
332 }
333
334 static int
335 ext4_ext_max_entries(struct inode *inode, int depth)
336 {
337         int max;
338
339         if (depth == ext_depth(inode)) {
340                 if (depth == 0)
341                         max = ext4_ext_space_root(inode, 1);
342                 else
343                         max = ext4_ext_space_root_idx(inode, 1);
344         } else {
345                 if (depth == 0)
346                         max = ext4_ext_space_block(inode, 1);
347                 else
348                         max = ext4_ext_space_block_idx(inode, 1);
349         }
350
351         return max;
352 }
353
354 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
355 {
356         ext4_fsblk_t block = ext4_ext_pblock(ext);
357         int len = ext4_ext_get_actual_len(ext);
358
359         if (len == 0)
360                 return 0;
361         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
362 }
363
364 static int ext4_valid_extent_idx(struct inode *inode,
365                                 struct ext4_extent_idx *ext_idx)
366 {
367         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
368
369         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
370 }
371
372 static int ext4_valid_extent_entries(struct inode *inode,
373                                 struct ext4_extent_header *eh,
374                                 int depth)
375 {
376         unsigned short entries;
377         if (eh->eh_entries == 0)
378                 return 1;
379
380         entries = le16_to_cpu(eh->eh_entries);
381
382         if (depth == 0) {
383                 /* leaf entries */
384                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
385                 while (entries) {
386                         if (!ext4_valid_extent(inode, ext))
387                                 return 0;
388                         ext++;
389                         entries--;
390                 }
391         } else {
392                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
393                 while (entries) {
394                         if (!ext4_valid_extent_idx(inode, ext_idx))
395                                 return 0;
396                         ext_idx++;
397                         entries--;
398                 }
399         }
400         return 1;
401 }
402
403 static int __ext4_ext_check(const char *function, unsigned int line,
404                             struct inode *inode, struct ext4_extent_header *eh,
405                             int depth)
406 {
407         const char *error_msg;
408         int max = 0;
409
410         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
411                 error_msg = "invalid magic";
412                 goto corrupted;
413         }
414         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
415                 error_msg = "unexpected eh_depth";
416                 goto corrupted;
417         }
418         if (unlikely(eh->eh_max == 0)) {
419                 error_msg = "invalid eh_max";
420                 goto corrupted;
421         }
422         max = ext4_ext_max_entries(inode, depth);
423         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
424                 error_msg = "too large eh_max";
425                 goto corrupted;
426         }
427         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
428                 error_msg = "invalid eh_entries";
429                 goto corrupted;
430         }
431         if (!ext4_valid_extent_entries(inode, eh, depth)) {
432                 error_msg = "invalid extent entries";
433                 goto corrupted;
434         }
435         /* Verify checksum on non-root extent tree nodes */
436         if (ext_depth(inode) != depth &&
437             !ext4_extent_block_csum_verify(inode, eh)) {
438                 error_msg = "extent tree corrupted";
439                 goto corrupted;
440         }
441         return 0;
442
443 corrupted:
444         ext4_error_inode(inode, function, line, 0,
445                         "bad header/extent: %s - magic %x, "
446                         "entries %u, max %u(%u), depth %u(%u)",
447                         error_msg, le16_to_cpu(eh->eh_magic),
448                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
449                         max, le16_to_cpu(eh->eh_depth), depth);
450
451         return -EIO;
452 }
453
454 #define ext4_ext_check(inode, eh, depth)        \
455         __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
456
457 int ext4_ext_check_inode(struct inode *inode)
458 {
459         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
460 }
461
462 static int __ext4_ext_check_block(const char *function, unsigned int line,
463                                   struct inode *inode,
464                                   struct ext4_extent_header *eh,
465                                   int depth,
466                                   struct buffer_head *bh)
467 {
468         int ret;
469
470         if (buffer_verified(bh))
471                 return 0;
472         ret = ext4_ext_check(inode, eh, depth);
473         if (ret)
474                 return ret;
475         set_buffer_verified(bh);
476         return ret;
477 }
478
479 #define ext4_ext_check_block(inode, eh, depth, bh)      \
480         __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
481
482 #ifdef EXT_DEBUG
483 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
484 {
485         int k, l = path->p_depth;
486
487         ext_debug("path:");
488         for (k = 0; k <= l; k++, path++) {
489                 if (path->p_idx) {
490                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
491                             ext4_idx_pblock(path->p_idx));
492                 } else if (path->p_ext) {
493                         ext_debug("  %d:[%d]%d:%llu ",
494                                   le32_to_cpu(path->p_ext->ee_block),
495                                   ext4_ext_is_uninitialized(path->p_ext),
496                                   ext4_ext_get_actual_len(path->p_ext),
497                                   ext4_ext_pblock(path->p_ext));
498                 } else
499                         ext_debug("  []");
500         }
501         ext_debug("\n");
502 }
503
504 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
505 {
506         int depth = ext_depth(inode);
507         struct ext4_extent_header *eh;
508         struct ext4_extent *ex;
509         int i;
510
511         if (!path)
512                 return;
513
514         eh = path[depth].p_hdr;
515         ex = EXT_FIRST_EXTENT(eh);
516
517         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
518
519         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
520                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
521                           ext4_ext_is_uninitialized(ex),
522                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
523         }
524         ext_debug("\n");
525 }
526
527 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
528                         ext4_fsblk_t newblock, int level)
529 {
530         int depth = ext_depth(inode);
531         struct ext4_extent *ex;
532
533         if (depth != level) {
534                 struct ext4_extent_idx *idx;
535                 idx = path[level].p_idx;
536                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
537                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
538                                         le32_to_cpu(idx->ei_block),
539                                         ext4_idx_pblock(idx),
540                                         newblock);
541                         idx++;
542                 }
543
544                 return;
545         }
546
547         ex = path[depth].p_ext;
548         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
549                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
550                                 le32_to_cpu(ex->ee_block),
551                                 ext4_ext_pblock(ex),
552                                 ext4_ext_is_uninitialized(ex),
553                                 ext4_ext_get_actual_len(ex),
554                                 newblock);
555                 ex++;
556         }
557 }
558
559 #else
560 #define ext4_ext_show_path(inode, path)
561 #define ext4_ext_show_leaf(inode, path)
562 #define ext4_ext_show_move(inode, path, newblock, level)
563 #endif
564
565 void ext4_ext_drop_refs(struct ext4_ext_path *path)
566 {
567         int depth = path->p_depth;
568         int i;
569
570         for (i = 0; i <= depth; i++, path++)
571                 if (path->p_bh) {
572                         brelse(path->p_bh);
573                         path->p_bh = NULL;
574                 }
575 }
576
577 /*
578  * ext4_ext_binsearch_idx:
579  * binary search for the closest index of the given block
580  * the header must be checked before calling this
581  */
582 static void
583 ext4_ext_binsearch_idx(struct inode *inode,
584                         struct ext4_ext_path *path, ext4_lblk_t block)
585 {
586         struct ext4_extent_header *eh = path->p_hdr;
587         struct ext4_extent_idx *r, *l, *m;
588
589
590         ext_debug("binsearch for %u(idx):  ", block);
591
592         l = EXT_FIRST_INDEX(eh) + 1;
593         r = EXT_LAST_INDEX(eh);
594         while (l <= r) {
595                 m = l + (r - l) / 2;
596                 if (block < le32_to_cpu(m->ei_block))
597                         r = m - 1;
598                 else
599                         l = m + 1;
600                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
601                                 m, le32_to_cpu(m->ei_block),
602                                 r, le32_to_cpu(r->ei_block));
603         }
604
605         path->p_idx = l - 1;
606         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
607                   ext4_idx_pblock(path->p_idx));
608
609 #ifdef CHECK_BINSEARCH
610         {
611                 struct ext4_extent_idx *chix, *ix;
612                 int k;
613
614                 chix = ix = EXT_FIRST_INDEX(eh);
615                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
616                   if (k != 0 &&
617                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
618                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
619                                        "first=0x%p\n", k,
620                                        ix, EXT_FIRST_INDEX(eh));
621                                 printk(KERN_DEBUG "%u <= %u\n",
622                                        le32_to_cpu(ix->ei_block),
623                                        le32_to_cpu(ix[-1].ei_block));
624                         }
625                         BUG_ON(k && le32_to_cpu(ix->ei_block)
626                                            <= le32_to_cpu(ix[-1].ei_block));
627                         if (block < le32_to_cpu(ix->ei_block))
628                                 break;
629                         chix = ix;
630                 }
631                 BUG_ON(chix != path->p_idx);
632         }
633 #endif
634
635 }
636
637 /*
638  * ext4_ext_binsearch:
639  * binary search for closest extent of the given block
640  * the header must be checked before calling this
641  */
642 static void
643 ext4_ext_binsearch(struct inode *inode,
644                 struct ext4_ext_path *path, ext4_lblk_t block)
645 {
646         struct ext4_extent_header *eh = path->p_hdr;
647         struct ext4_extent *r, *l, *m;
648
649         if (eh->eh_entries == 0) {
650                 /*
651                  * this leaf is empty:
652                  * we get such a leaf in split/add case
653                  */
654                 return;
655         }
656
657         ext_debug("binsearch for %u:  ", block);
658
659         l = EXT_FIRST_EXTENT(eh) + 1;
660         r = EXT_LAST_EXTENT(eh);
661
662         while (l <= r) {
663                 m = l + (r - l) / 2;
664                 if (block < le32_to_cpu(m->ee_block))
665                         r = m - 1;
666                 else
667                         l = m + 1;
668                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
669                                 m, le32_to_cpu(m->ee_block),
670                                 r, le32_to_cpu(r->ee_block));
671         }
672
673         path->p_ext = l - 1;
674         ext_debug("  -> %d:%llu:[%d]%d ",
675                         le32_to_cpu(path->p_ext->ee_block),
676                         ext4_ext_pblock(path->p_ext),
677                         ext4_ext_is_uninitialized(path->p_ext),
678                         ext4_ext_get_actual_len(path->p_ext));
679
680 #ifdef CHECK_BINSEARCH
681         {
682                 struct ext4_extent *chex, *ex;
683                 int k;
684
685                 chex = ex = EXT_FIRST_EXTENT(eh);
686                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
687                         BUG_ON(k && le32_to_cpu(ex->ee_block)
688                                           <= le32_to_cpu(ex[-1].ee_block));
689                         if (block < le32_to_cpu(ex->ee_block))
690                                 break;
691                         chex = ex;
692                 }
693                 BUG_ON(chex != path->p_ext);
694         }
695 #endif
696
697 }
698
699 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
700 {
701         struct ext4_extent_header *eh;
702
703         eh = ext_inode_hdr(inode);
704         eh->eh_depth = 0;
705         eh->eh_entries = 0;
706         eh->eh_magic = EXT4_EXT_MAGIC;
707         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
708         ext4_mark_inode_dirty(handle, inode);
709         ext4_ext_invalidate_cache(inode);
710         return 0;
711 }
712
713 struct ext4_ext_path *
714 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
715                                         struct ext4_ext_path *path)
716 {
717         struct ext4_extent_header *eh;
718         struct buffer_head *bh;
719         short int depth, i, ppos = 0, alloc = 0;
720
721         eh = ext_inode_hdr(inode);
722         depth = ext_depth(inode);
723
724         /* account possible depth increase */
725         if (!path) {
726                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
727                                 GFP_NOFS);
728                 if (!path)
729                         return ERR_PTR(-ENOMEM);
730                 alloc = 1;
731         }
732         path[0].p_hdr = eh;
733         path[0].p_bh = NULL;
734
735         i = depth;
736         /* walk through the tree */
737         while (i) {
738                 ext_debug("depth %d: num %d, max %d\n",
739                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
740
741                 ext4_ext_binsearch_idx(inode, path + ppos, block);
742                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
743                 path[ppos].p_depth = i;
744                 path[ppos].p_ext = NULL;
745
746                 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
747                 if (unlikely(!bh))
748                         goto err;
749                 if (!bh_uptodate_or_lock(bh)) {
750                         trace_ext4_ext_load_extent(inode, block,
751                                                 path[ppos].p_block);
752                         if (bh_submit_read(bh) < 0) {
753                                 put_bh(bh);
754                                 goto err;
755                         }
756                 }
757                 eh = ext_block_hdr(bh);
758                 ppos++;
759                 if (unlikely(ppos > depth)) {
760                         put_bh(bh);
761                         EXT4_ERROR_INODE(inode,
762                                          "ppos %d > depth %d", ppos, depth);
763                         goto err;
764                 }
765                 path[ppos].p_bh = bh;
766                 path[ppos].p_hdr = eh;
767                 i--;
768
769                 if (ext4_ext_check_block(inode, eh, i, bh))
770                         goto err;
771         }
772
773         path[ppos].p_depth = i;
774         path[ppos].p_ext = NULL;
775         path[ppos].p_idx = NULL;
776
777         /* find extent */
778         ext4_ext_binsearch(inode, path + ppos, block);
779         /* if not an empty leaf */
780         if (path[ppos].p_ext)
781                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
782
783         ext4_ext_show_path(inode, path);
784
785         return path;
786
787 err:
788         ext4_ext_drop_refs(path);
789         if (alloc)
790                 kfree(path);
791         return ERR_PTR(-EIO);
792 }
793
794 /*
795  * ext4_ext_insert_index:
796  * insert new index [@logical;@ptr] into the block at @curp;
797  * check where to insert: before @curp or after @curp
798  */
799 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
800                                  struct ext4_ext_path *curp,
801                                  int logical, ext4_fsblk_t ptr)
802 {
803         struct ext4_extent_idx *ix;
804         int len, err;
805
806         err = ext4_ext_get_access(handle, inode, curp);
807         if (err)
808                 return err;
809
810         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
811                 EXT4_ERROR_INODE(inode,
812                                  "logical %d == ei_block %d!",
813                                  logical, le32_to_cpu(curp->p_idx->ei_block));
814                 return -EIO;
815         }
816
817         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
818                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
819                 EXT4_ERROR_INODE(inode,
820                                  "eh_entries %d >= eh_max %d!",
821                                  le16_to_cpu(curp->p_hdr->eh_entries),
822                                  le16_to_cpu(curp->p_hdr->eh_max));
823                 return -EIO;
824         }
825
826         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
827                 /* insert after */
828                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
829                 ix = curp->p_idx + 1;
830         } else {
831                 /* insert before */
832                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
833                 ix = curp->p_idx;
834         }
835
836         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
837         BUG_ON(len < 0);
838         if (len > 0) {
839                 ext_debug("insert new index %d: "
840                                 "move %d indices from 0x%p to 0x%p\n",
841                                 logical, len, ix, ix + 1);
842                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
843         }
844
845         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
846                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
847                 return -EIO;
848         }
849
850         ix->ei_block = cpu_to_le32(logical);
851         ext4_idx_store_pblock(ix, ptr);
852         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
853
854         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
855                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
856                 return -EIO;
857         }
858
859         err = ext4_ext_dirty(handle, inode, curp);
860         ext4_std_error(inode->i_sb, err);
861
862         return err;
863 }
864
865 /*
866  * ext4_ext_split:
867  * inserts new subtree into the path, using free index entry
868  * at depth @at:
869  * - allocates all needed blocks (new leaf and all intermediate index blocks)
870  * - makes decision where to split
871  * - moves remaining extents and index entries (right to the split point)
872  *   into the newly allocated blocks
873  * - initializes subtree
874  */
875 static int ext4_ext_split(handle_t *handle, struct inode *inode,
876                           unsigned int flags,
877                           struct ext4_ext_path *path,
878                           struct ext4_extent *newext, int at)
879 {
880         struct buffer_head *bh = NULL;
881         int depth = ext_depth(inode);
882         struct ext4_extent_header *neh;
883         struct ext4_extent_idx *fidx;
884         int i = at, k, m, a;
885         ext4_fsblk_t newblock, oldblock;
886         __le32 border;
887         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
888         int err = 0;
889
890         /* make decision: where to split? */
891         /* FIXME: now decision is simplest: at current extent */
892
893         /* if current leaf will be split, then we should use
894          * border from split point */
895         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
896                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
897                 return -EIO;
898         }
899         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
900                 border = path[depth].p_ext[1].ee_block;
901                 ext_debug("leaf will be split."
902                                 " next leaf starts at %d\n",
903                                   le32_to_cpu(border));
904         } else {
905                 border = newext->ee_block;
906                 ext_debug("leaf will be added."
907                                 " next leaf starts at %d\n",
908                                 le32_to_cpu(border));
909         }
910
911         /*
912          * If error occurs, then we break processing
913          * and mark filesystem read-only. index won't
914          * be inserted and tree will be in consistent
915          * state. Next mount will repair buffers too.
916          */
917
918         /*
919          * Get array to track all allocated blocks.
920          * We need this to handle errors and free blocks
921          * upon them.
922          */
923         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
924         if (!ablocks)
925                 return -ENOMEM;
926
927         /* allocate all needed blocks */
928         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
929         for (a = 0; a < depth - at; a++) {
930                 newblock = ext4_ext_new_meta_block(handle, inode, path,
931                                                    newext, &err, flags);
932                 if (newblock == 0)
933                         goto cleanup;
934                 ablocks[a] = newblock;
935         }
936
937         /* initialize new leaf */
938         newblock = ablocks[--a];
939         if (unlikely(newblock == 0)) {
940                 EXT4_ERROR_INODE(inode, "newblock == 0!");
941                 err = -EIO;
942                 goto cleanup;
943         }
944         bh = sb_getblk(inode->i_sb, newblock);
945         if (!bh) {
946                 err = -EIO;
947                 goto cleanup;
948         }
949         lock_buffer(bh);
950
951         err = ext4_journal_get_create_access(handle, bh);
952         if (err)
953                 goto cleanup;
954
955         neh = ext_block_hdr(bh);
956         neh->eh_entries = 0;
957         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
958         neh->eh_magic = EXT4_EXT_MAGIC;
959         neh->eh_depth = 0;
960
961         /* move remainder of path[depth] to the new leaf */
962         if (unlikely(path[depth].p_hdr->eh_entries !=
963                      path[depth].p_hdr->eh_max)) {
964                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
965                                  path[depth].p_hdr->eh_entries,
966                                  path[depth].p_hdr->eh_max);
967                 err = -EIO;
968                 goto cleanup;
969         }
970         /* start copy from next extent */
971         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
972         ext4_ext_show_move(inode, path, newblock, depth);
973         if (m) {
974                 struct ext4_extent *ex;
975                 ex = EXT_FIRST_EXTENT(neh);
976                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
977                 le16_add_cpu(&neh->eh_entries, m);
978         }
979
980         ext4_extent_block_csum_set(inode, neh);
981         set_buffer_uptodate(bh);
982         unlock_buffer(bh);
983
984         err = ext4_handle_dirty_metadata(handle, inode, bh);
985         if (err)
986                 goto cleanup;
987         brelse(bh);
988         bh = NULL;
989
990         /* correct old leaf */
991         if (m) {
992                 err = ext4_ext_get_access(handle, inode, path + depth);
993                 if (err)
994                         goto cleanup;
995                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
996                 err = ext4_ext_dirty(handle, inode, path + depth);
997                 if (err)
998                         goto cleanup;
999
1000         }
1001
1002         /* create intermediate indexes */
1003         k = depth - at - 1;
1004         if (unlikely(k < 0)) {
1005                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1006                 err = -EIO;
1007                 goto cleanup;
1008         }
1009         if (k)
1010                 ext_debug("create %d intermediate indices\n", k);
1011         /* insert new index into current index block */
1012         /* current depth stored in i var */
1013         i = depth - 1;
1014         while (k--) {
1015                 oldblock = newblock;
1016                 newblock = ablocks[--a];
1017                 bh = sb_getblk(inode->i_sb, newblock);
1018                 if (!bh) {
1019                         err = -EIO;
1020                         goto cleanup;
1021                 }
1022                 lock_buffer(bh);
1023
1024                 err = ext4_journal_get_create_access(handle, bh);
1025                 if (err)
1026                         goto cleanup;
1027
1028                 neh = ext_block_hdr(bh);
1029                 neh->eh_entries = cpu_to_le16(1);
1030                 neh->eh_magic = EXT4_EXT_MAGIC;
1031                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1032                 neh->eh_depth = cpu_to_le16(depth - i);
1033                 fidx = EXT_FIRST_INDEX(neh);
1034                 fidx->ei_block = border;
1035                 ext4_idx_store_pblock(fidx, oldblock);
1036
1037                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1038                                 i, newblock, le32_to_cpu(border), oldblock);
1039
1040                 /* move remainder of path[i] to the new index block */
1041                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1042                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1043                         EXT4_ERROR_INODE(inode,
1044                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1045                                          le32_to_cpu(path[i].p_ext->ee_block));
1046                         err = -EIO;
1047                         goto cleanup;
1048                 }
1049                 /* start copy indexes */
1050                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1051                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1052                                 EXT_MAX_INDEX(path[i].p_hdr));
1053                 ext4_ext_show_move(inode, path, newblock, i);
1054                 if (m) {
1055                         memmove(++fidx, path[i].p_idx,
1056                                 sizeof(struct ext4_extent_idx) * m);
1057                         le16_add_cpu(&neh->eh_entries, m);
1058                 }
1059                 ext4_extent_block_csum_set(inode, neh);
1060                 set_buffer_uptodate(bh);
1061                 unlock_buffer(bh);
1062
1063                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1064                 if (err)
1065                         goto cleanup;
1066                 brelse(bh);
1067                 bh = NULL;
1068
1069                 /* correct old index */
1070                 if (m) {
1071                         err = ext4_ext_get_access(handle, inode, path + i);
1072                         if (err)
1073                                 goto cleanup;
1074                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1075                         err = ext4_ext_dirty(handle, inode, path + i);
1076                         if (err)
1077                                 goto cleanup;
1078                 }
1079
1080                 i--;
1081         }
1082
1083         /* insert new index */
1084         err = ext4_ext_insert_index(handle, inode, path + at,
1085                                     le32_to_cpu(border), newblock);
1086
1087 cleanup:
1088         if (bh) {
1089                 if (buffer_locked(bh))
1090                         unlock_buffer(bh);
1091                 brelse(bh);
1092         }
1093
1094         if (err) {
1095                 /* free all allocated blocks in error case */
1096                 for (i = 0; i < depth; i++) {
1097                         if (!ablocks[i])
1098                                 continue;
1099                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1100                                          EXT4_FREE_BLOCKS_METADATA);
1101                 }
1102         }
1103         kfree(ablocks);
1104
1105         return err;
1106 }
1107
1108 /*
1109  * ext4_ext_grow_indepth:
1110  * implements tree growing procedure:
1111  * - allocates new block
1112  * - moves top-level data (index block or leaf) into the new block
1113  * - initializes new top-level, creating index that points to the
1114  *   just created block
1115  */
1116 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1117                                  unsigned int flags,
1118                                  struct ext4_extent *newext)
1119 {
1120         struct ext4_extent_header *neh;
1121         struct buffer_head *bh;
1122         ext4_fsblk_t newblock;
1123         int err = 0;
1124
1125         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1126                 newext, &err, flags);
1127         if (newblock == 0)
1128                 return err;
1129
1130         bh = sb_getblk(inode->i_sb, newblock);
1131         if (!bh) {
1132                 err = -EIO;
1133                 ext4_std_error(inode->i_sb, err);
1134                 return err;
1135         }
1136         lock_buffer(bh);
1137
1138         err = ext4_journal_get_create_access(handle, bh);
1139         if (err) {
1140                 unlock_buffer(bh);
1141                 goto out;
1142         }
1143
1144         /* move top-level index/leaf into new block */
1145         memmove(bh->b_data, EXT4_I(inode)->i_data,
1146                 sizeof(EXT4_I(inode)->i_data));
1147
1148         /* set size of new block */
1149         neh = ext_block_hdr(bh);
1150         /* old root could have indexes or leaves
1151          * so calculate e_max right way */
1152         if (ext_depth(inode))
1153                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1154         else
1155                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1156         neh->eh_magic = EXT4_EXT_MAGIC;
1157         ext4_extent_block_csum_set(inode, neh);
1158         set_buffer_uptodate(bh);
1159         unlock_buffer(bh);
1160
1161         err = ext4_handle_dirty_metadata(handle, inode, bh);
1162         if (err)
1163                 goto out;
1164
1165         /* Update top-level index: num,max,pointer */
1166         neh = ext_inode_hdr(inode);
1167         neh->eh_entries = cpu_to_le16(1);
1168         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1169         if (neh->eh_depth == 0) {
1170                 /* Root extent block becomes index block */
1171                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1172                 EXT_FIRST_INDEX(neh)->ei_block =
1173                         EXT_FIRST_EXTENT(neh)->ee_block;
1174         }
1175         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1176                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1177                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1178                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1179
1180         neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1181         ext4_mark_inode_dirty(handle, inode);
1182 out:
1183         brelse(bh);
1184
1185         return err;
1186 }
1187
1188 /*
1189  * ext4_ext_create_new_leaf:
1190  * finds empty index and adds new leaf.
1191  * if no free index is found, then it requests in-depth growing.
1192  */
1193 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1194                                     unsigned int flags,
1195                                     struct ext4_ext_path *path,
1196                                     struct ext4_extent *newext)
1197 {
1198         struct ext4_ext_path *curp;
1199         int depth, i, err = 0;
1200
1201 repeat:
1202         i = depth = ext_depth(inode);
1203
1204         /* walk up to the tree and look for free index entry */
1205         curp = path + depth;
1206         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1207                 i--;
1208                 curp--;
1209         }
1210
1211         /* we use already allocated block for index block,
1212          * so subsequent data blocks should be contiguous */
1213         if (EXT_HAS_FREE_INDEX(curp)) {
1214                 /* if we found index with free entry, then use that
1215                  * entry: create all needed subtree and add new leaf */
1216                 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1217                 if (err)
1218                         goto out;
1219
1220                 /* refill path */
1221                 ext4_ext_drop_refs(path);
1222                 path = ext4_ext_find_extent(inode,
1223                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1224                                     path);
1225                 if (IS_ERR(path))
1226                         err = PTR_ERR(path);
1227         } else {
1228                 /* tree is full, time to grow in depth */
1229                 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1230                 if (err)
1231                         goto out;
1232
1233                 /* refill path */
1234                 ext4_ext_drop_refs(path);
1235                 path = ext4_ext_find_extent(inode,
1236                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1237                                     path);
1238                 if (IS_ERR(path)) {
1239                         err = PTR_ERR(path);
1240                         goto out;
1241                 }
1242
1243                 /*
1244                  * only first (depth 0 -> 1) produces free space;
1245                  * in all other cases we have to split the grown tree
1246                  */
1247                 depth = ext_depth(inode);
1248                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1249                         /* now we need to split */
1250                         goto repeat;
1251                 }
1252         }
1253
1254 out:
1255         return err;
1256 }
1257
1258 /*
1259  * search the closest allocated block to the left for *logical
1260  * and returns it at @logical + it's physical address at @phys
1261  * if *logical is the smallest allocated block, the function
1262  * returns 0 at @phys
1263  * return value contains 0 (success) or error code
1264  */
1265 static int ext4_ext_search_left(struct inode *inode,
1266                                 struct ext4_ext_path *path,
1267                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1268 {
1269         struct ext4_extent_idx *ix;
1270         struct ext4_extent *ex;
1271         int depth, ee_len;
1272
1273         if (unlikely(path == NULL)) {
1274                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1275                 return -EIO;
1276         }
1277         depth = path->p_depth;
1278         *phys = 0;
1279
1280         if (depth == 0 && path->p_ext == NULL)
1281                 return 0;
1282
1283         /* usually extent in the path covers blocks smaller
1284          * then *logical, but it can be that extent is the
1285          * first one in the file */
1286
1287         ex = path[depth].p_ext;
1288         ee_len = ext4_ext_get_actual_len(ex);
1289         if (*logical < le32_to_cpu(ex->ee_block)) {
1290                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1291                         EXT4_ERROR_INODE(inode,
1292                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1293                                          *logical, le32_to_cpu(ex->ee_block));
1294                         return -EIO;
1295                 }
1296                 while (--depth >= 0) {
1297                         ix = path[depth].p_idx;
1298                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1299                                 EXT4_ERROR_INODE(inode,
1300                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1301                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1302                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1303                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1304                                   depth);
1305                                 return -EIO;
1306                         }
1307                 }
1308                 return 0;
1309         }
1310
1311         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1312                 EXT4_ERROR_INODE(inode,
1313                                  "logical %d < ee_block %d + ee_len %d!",
1314                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1315                 return -EIO;
1316         }
1317
1318         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1319         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1320         return 0;
1321 }
1322
1323 /*
1324  * search the closest allocated block to the right for *logical
1325  * and returns it at @logical + it's physical address at @phys
1326  * if *logical is the largest allocated block, the function
1327  * returns 0 at @phys
1328  * return value contains 0 (success) or error code
1329  */
1330 static int ext4_ext_search_right(struct inode *inode,
1331                                  struct ext4_ext_path *path,
1332                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1333                                  struct ext4_extent **ret_ex)
1334 {
1335         struct buffer_head *bh = NULL;
1336         struct ext4_extent_header *eh;
1337         struct ext4_extent_idx *ix;
1338         struct ext4_extent *ex;
1339         ext4_fsblk_t block;
1340         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1341         int ee_len;
1342
1343         if (unlikely(path == NULL)) {
1344                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1345                 return -EIO;
1346         }
1347         depth = path->p_depth;
1348         *phys = 0;
1349
1350         if (depth == 0 && path->p_ext == NULL)
1351                 return 0;
1352
1353         /* usually extent in the path covers blocks smaller
1354          * then *logical, but it can be that extent is the
1355          * first one in the file */
1356
1357         ex = path[depth].p_ext;
1358         ee_len = ext4_ext_get_actual_len(ex);
1359         if (*logical < le32_to_cpu(ex->ee_block)) {
1360                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1361                         EXT4_ERROR_INODE(inode,
1362                                          "first_extent(path[%d].p_hdr) != ex",
1363                                          depth);
1364                         return -EIO;
1365                 }
1366                 while (--depth >= 0) {
1367                         ix = path[depth].p_idx;
1368                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1369                                 EXT4_ERROR_INODE(inode,
1370                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1371                                                  *logical);
1372                                 return -EIO;
1373                         }
1374                 }
1375                 goto found_extent;
1376         }
1377
1378         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1379                 EXT4_ERROR_INODE(inode,
1380                                  "logical %d < ee_block %d + ee_len %d!",
1381                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1382                 return -EIO;
1383         }
1384
1385         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1386                 /* next allocated block in this leaf */
1387                 ex++;
1388                 goto found_extent;
1389         }
1390
1391         /* go up and search for index to the right */
1392         while (--depth >= 0) {
1393                 ix = path[depth].p_idx;
1394                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1395                         goto got_index;
1396         }
1397
1398         /* we've gone up to the root and found no index to the right */
1399         return 0;
1400
1401 got_index:
1402         /* we've found index to the right, let's
1403          * follow it and find the closest allocated
1404          * block to the right */
1405         ix++;
1406         block = ext4_idx_pblock(ix);
1407         while (++depth < path->p_depth) {
1408                 bh = sb_bread(inode->i_sb, block);
1409                 if (bh == NULL)
1410                         return -EIO;
1411                 eh = ext_block_hdr(bh);
1412                 /* subtract from p_depth to get proper eh_depth */
1413                 if (ext4_ext_check_block(inode, eh,
1414                                          path->p_depth - depth, bh)) {
1415                         put_bh(bh);
1416                         return -EIO;
1417                 }
1418                 ix = EXT_FIRST_INDEX(eh);
1419                 block = ext4_idx_pblock(ix);
1420                 put_bh(bh);
1421         }
1422
1423         bh = sb_bread(inode->i_sb, block);
1424         if (bh == NULL)
1425                 return -EIO;
1426         eh = ext_block_hdr(bh);
1427         if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1428                 put_bh(bh);
1429                 return -EIO;
1430         }
1431         ex = EXT_FIRST_EXTENT(eh);
1432 found_extent:
1433         *logical = le32_to_cpu(ex->ee_block);
1434         *phys = ext4_ext_pblock(ex);
1435         *ret_ex = ex;
1436         if (bh)
1437                 put_bh(bh);
1438         return 0;
1439 }
1440
1441 /*
1442  * ext4_ext_next_allocated_block:
1443  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1444  * NOTE: it considers block number from index entry as
1445  * allocated block. Thus, index entries have to be consistent
1446  * with leaves.
1447  */
1448 static ext4_lblk_t
1449 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1450 {
1451         int depth;
1452
1453         BUG_ON(path == NULL);
1454         depth = path->p_depth;
1455
1456         if (depth == 0 && path->p_ext == NULL)
1457                 return EXT_MAX_BLOCKS;
1458
1459         while (depth >= 0) {
1460                 if (depth == path->p_depth) {
1461                         /* leaf */
1462                         if (path[depth].p_ext &&
1463                                 path[depth].p_ext !=
1464                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1465                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1466                 } else {
1467                         /* index */
1468                         if (path[depth].p_idx !=
1469                                         EXT_LAST_INDEX(path[depth].p_hdr))
1470                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1471                 }
1472                 depth--;
1473         }
1474
1475         return EXT_MAX_BLOCKS;
1476 }
1477
1478 /*
1479  * ext4_ext_next_leaf_block:
1480  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1481  */
1482 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1483 {
1484         int depth;
1485
1486         BUG_ON(path == NULL);
1487         depth = path->p_depth;
1488
1489         /* zero-tree has no leaf blocks at all */
1490         if (depth == 0)
1491                 return EXT_MAX_BLOCKS;
1492
1493         /* go to index block */
1494         depth--;
1495
1496         while (depth >= 0) {
1497                 if (path[depth].p_idx !=
1498                                 EXT_LAST_INDEX(path[depth].p_hdr))
1499                         return (ext4_lblk_t)
1500                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1501                 depth--;
1502         }
1503
1504         return EXT_MAX_BLOCKS;
1505 }
1506
1507 /*
1508  * ext4_ext_correct_indexes:
1509  * if leaf gets modified and modified extent is first in the leaf,
1510  * then we have to correct all indexes above.
1511  * TODO: do we need to correct tree in all cases?
1512  */
1513 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1514                                 struct ext4_ext_path *path)
1515 {
1516         struct ext4_extent_header *eh;
1517         int depth = ext_depth(inode);
1518         struct ext4_extent *ex;
1519         __le32 border;
1520         int k, err = 0;
1521
1522         eh = path[depth].p_hdr;
1523         ex = path[depth].p_ext;
1524
1525         if (unlikely(ex == NULL || eh == NULL)) {
1526                 EXT4_ERROR_INODE(inode,
1527                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1528                 return -EIO;
1529         }
1530
1531         if (depth == 0) {
1532                 /* there is no tree at all */
1533                 return 0;
1534         }
1535
1536         if (ex != EXT_FIRST_EXTENT(eh)) {
1537                 /* we correct tree if first leaf got modified only */
1538                 return 0;
1539         }
1540
1541         /*
1542          * TODO: we need correction if border is smaller than current one
1543          */
1544         k = depth - 1;
1545         border = path[depth].p_ext->ee_block;
1546         err = ext4_ext_get_access(handle, inode, path + k);
1547         if (err)
1548                 return err;
1549         path[k].p_idx->ei_block = border;
1550         err = ext4_ext_dirty(handle, inode, path + k);
1551         if (err)
1552                 return err;
1553
1554         while (k--) {
1555                 /* change all left-side indexes */
1556                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1557                         break;
1558                 err = ext4_ext_get_access(handle, inode, path + k);
1559                 if (err)
1560                         break;
1561                 path[k].p_idx->ei_block = border;
1562                 err = ext4_ext_dirty(handle, inode, path + k);
1563                 if (err)
1564                         break;
1565         }
1566
1567         return err;
1568 }
1569
1570 int
1571 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1572                                 struct ext4_extent *ex2)
1573 {
1574         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1575
1576         /*
1577          * Make sure that either both extents are uninitialized, or
1578          * both are _not_.
1579          */
1580         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1581                 return 0;
1582
1583         if (ext4_ext_is_uninitialized(ex1))
1584                 max_len = EXT_UNINIT_MAX_LEN;
1585         else
1586                 max_len = EXT_INIT_MAX_LEN;
1587
1588         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1589         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1590
1591         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1592                         le32_to_cpu(ex2->ee_block))
1593                 return 0;
1594
1595         /*
1596          * To allow future support for preallocated extents to be added
1597          * as an RO_COMPAT feature, refuse to merge to extents if
1598          * this can result in the top bit of ee_len being set.
1599          */
1600         if (ext1_ee_len + ext2_ee_len > max_len)
1601                 return 0;
1602 #ifdef AGGRESSIVE_TEST
1603         if (ext1_ee_len >= 4)
1604                 return 0;
1605 #endif
1606
1607         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1608                 return 1;
1609         return 0;
1610 }
1611
1612 /*
1613  * This function tries to merge the "ex" extent to the next extent in the tree.
1614  * It always tries to merge towards right. If you want to merge towards
1615  * left, pass "ex - 1" as argument instead of "ex".
1616  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1617  * 1 if they got merged.
1618  */
1619 static int ext4_ext_try_to_merge_right(struct inode *inode,
1620                                  struct ext4_ext_path *path,
1621                                  struct ext4_extent *ex)
1622 {
1623         struct ext4_extent_header *eh;
1624         unsigned int depth, len;
1625         int merge_done = 0;
1626         int uninitialized = 0;
1627
1628         depth = ext_depth(inode);
1629         BUG_ON(path[depth].p_hdr == NULL);
1630         eh = path[depth].p_hdr;
1631
1632         while (ex < EXT_LAST_EXTENT(eh)) {
1633                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1634                         break;
1635                 /* merge with next extent! */
1636                 if (ext4_ext_is_uninitialized(ex))
1637                         uninitialized = 1;
1638                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1639                                 + ext4_ext_get_actual_len(ex + 1));
1640                 if (uninitialized)
1641                         ext4_ext_mark_uninitialized(ex);
1642
1643                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1644                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1645                                 * sizeof(struct ext4_extent);
1646                         memmove(ex + 1, ex + 2, len);
1647                 }
1648                 le16_add_cpu(&eh->eh_entries, -1);
1649                 merge_done = 1;
1650                 WARN_ON(eh->eh_entries == 0);
1651                 if (!eh->eh_entries)
1652                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1653         }
1654
1655         return merge_done;
1656 }
1657
1658 /*
1659  * This function tries to merge the @ex extent to neighbours in the tree.
1660  * return 1 if merge left else 0.
1661  */
1662 static int ext4_ext_try_to_merge(struct inode *inode,
1663                                   struct ext4_ext_path *path,
1664                                   struct ext4_extent *ex) {
1665         struct ext4_extent_header *eh;
1666         unsigned int depth;
1667         int merge_done = 0;
1668         int ret = 0;
1669
1670         depth = ext_depth(inode);
1671         BUG_ON(path[depth].p_hdr == NULL);
1672         eh = path[depth].p_hdr;
1673
1674         if (ex > EXT_FIRST_EXTENT(eh))
1675                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1676
1677         if (!merge_done)
1678                 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1679
1680         return ret;
1681 }
1682
1683 /*
1684  * check if a portion of the "newext" extent overlaps with an
1685  * existing extent.
1686  *
1687  * If there is an overlap discovered, it updates the length of the newext
1688  * such that there will be no overlap, and then returns 1.
1689  * If there is no overlap found, it returns 0.
1690  */
1691 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1692                                            struct inode *inode,
1693                                            struct ext4_extent *newext,
1694                                            struct ext4_ext_path *path)
1695 {
1696         ext4_lblk_t b1, b2;
1697         unsigned int depth, len1;
1698         unsigned int ret = 0;
1699
1700         b1 = le32_to_cpu(newext->ee_block);
1701         len1 = ext4_ext_get_actual_len(newext);
1702         depth = ext_depth(inode);
1703         if (!path[depth].p_ext)
1704                 goto out;
1705         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1706         b2 &= ~(sbi->s_cluster_ratio - 1);
1707
1708         /*
1709          * get the next allocated block if the extent in the path
1710          * is before the requested block(s)
1711          */
1712         if (b2 < b1) {
1713                 b2 = ext4_ext_next_allocated_block(path);
1714                 if (b2 == EXT_MAX_BLOCKS)
1715                         goto out;
1716                 b2 &= ~(sbi->s_cluster_ratio - 1);
1717         }
1718
1719         /* check for wrap through zero on extent logical start block*/
1720         if (b1 + len1 < b1) {
1721                 len1 = EXT_MAX_BLOCKS - b1;
1722                 newext->ee_len = cpu_to_le16(len1);
1723                 ret = 1;
1724         }
1725
1726         /* check for overlap */
1727         if (b1 + len1 > b2) {
1728                 newext->ee_len = cpu_to_le16(b2 - b1);
1729                 ret = 1;
1730         }
1731 out:
1732         return ret;
1733 }
1734
1735 /*
1736  * ext4_ext_insert_extent:
1737  * tries to merge requsted extent into the existing extent or
1738  * inserts requested extent as new one into the tree,
1739  * creating new leaf in the no-space case.
1740  */
1741 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1742                                 struct ext4_ext_path *path,
1743                                 struct ext4_extent *newext, int flag)
1744 {
1745         struct ext4_extent_header *eh;
1746         struct ext4_extent *ex, *fex;
1747         struct ext4_extent *nearex; /* nearest extent */
1748         struct ext4_ext_path *npath = NULL;
1749         int depth, len, err;
1750         ext4_lblk_t next;
1751         unsigned uninitialized = 0;
1752         int flags = 0;
1753
1754         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1755                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1756                 return -EIO;
1757         }
1758         depth = ext_depth(inode);
1759         ex = path[depth].p_ext;
1760         if (unlikely(path[depth].p_hdr == NULL)) {
1761                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1762                 return -EIO;
1763         }
1764
1765         /* try to insert block into found extent and return */
1766         if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1767                 && ext4_can_extents_be_merged(inode, ex, newext)) {
1768                 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1769                           ext4_ext_is_uninitialized(newext),
1770                           ext4_ext_get_actual_len(newext),
1771                           le32_to_cpu(ex->ee_block),
1772                           ext4_ext_is_uninitialized(ex),
1773                           ext4_ext_get_actual_len(ex),
1774                           ext4_ext_pblock(ex));
1775                 err = ext4_ext_get_access(handle, inode, path + depth);
1776                 if (err)
1777                         return err;
1778
1779                 /*
1780                  * ext4_can_extents_be_merged should have checked that either
1781                  * both extents are uninitialized, or both aren't. Thus we
1782                  * need to check only one of them here.
1783                  */
1784                 if (ext4_ext_is_uninitialized(ex))
1785                         uninitialized = 1;
1786                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1787                                         + ext4_ext_get_actual_len(newext));
1788                 if (uninitialized)
1789                         ext4_ext_mark_uninitialized(ex);
1790                 eh = path[depth].p_hdr;
1791                 nearex = ex;
1792                 goto merge;
1793         }
1794
1795         depth = ext_depth(inode);
1796         eh = path[depth].p_hdr;
1797         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1798                 goto has_space;
1799
1800         /* probably next leaf has space for us? */
1801         fex = EXT_LAST_EXTENT(eh);
1802         next = EXT_MAX_BLOCKS;
1803         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1804                 next = ext4_ext_next_leaf_block(path);
1805         if (next != EXT_MAX_BLOCKS) {
1806                 ext_debug("next leaf block - %u\n", next);
1807                 BUG_ON(npath != NULL);
1808                 npath = ext4_ext_find_extent(inode, next, NULL);
1809                 if (IS_ERR(npath))
1810                         return PTR_ERR(npath);
1811                 BUG_ON(npath->p_depth != path->p_depth);
1812                 eh = npath[depth].p_hdr;
1813                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1814                         ext_debug("next leaf isn't full(%d)\n",
1815                                   le16_to_cpu(eh->eh_entries));
1816                         path = npath;
1817                         goto has_space;
1818                 }
1819                 ext_debug("next leaf has no free space(%d,%d)\n",
1820                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1821         }
1822
1823         /*
1824          * There is no free space in the found leaf.
1825          * We're gonna add a new leaf in the tree.
1826          */
1827         if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1828                 flags = EXT4_MB_USE_ROOT_BLOCKS;
1829         err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1830         if (err)
1831                 goto cleanup;
1832         depth = ext_depth(inode);
1833         eh = path[depth].p_hdr;
1834
1835 has_space:
1836         nearex = path[depth].p_ext;
1837
1838         err = ext4_ext_get_access(handle, inode, path + depth);
1839         if (err)
1840                 goto cleanup;
1841
1842         if (!nearex) {
1843                 /* there is no extent in this leaf, create first one */
1844                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1845                                 le32_to_cpu(newext->ee_block),
1846                                 ext4_ext_pblock(newext),
1847                                 ext4_ext_is_uninitialized(newext),
1848                                 ext4_ext_get_actual_len(newext));
1849                 nearex = EXT_FIRST_EXTENT(eh);
1850         } else {
1851                 if (le32_to_cpu(newext->ee_block)
1852                            > le32_to_cpu(nearex->ee_block)) {
1853                         /* Insert after */
1854                         ext_debug("insert %u:%llu:[%d]%d before: "
1855                                         "nearest %p\n",
1856                                         le32_to_cpu(newext->ee_block),
1857                                         ext4_ext_pblock(newext),
1858                                         ext4_ext_is_uninitialized(newext),
1859                                         ext4_ext_get_actual_len(newext),
1860                                         nearex);
1861                         nearex++;
1862                 } else {
1863                         /* Insert before */
1864                         BUG_ON(newext->ee_block == nearex->ee_block);
1865                         ext_debug("insert %u:%llu:[%d]%d after: "
1866                                         "nearest %p\n",
1867                                         le32_to_cpu(newext->ee_block),
1868                                         ext4_ext_pblock(newext),
1869                                         ext4_ext_is_uninitialized(newext),
1870                                         ext4_ext_get_actual_len(newext),
1871                                         nearex);
1872                 }
1873                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1874                 if (len > 0) {
1875                         ext_debug("insert %u:%llu:[%d]%d: "
1876                                         "move %d extents from 0x%p to 0x%p\n",
1877                                         le32_to_cpu(newext->ee_block),
1878                                         ext4_ext_pblock(newext),
1879                                         ext4_ext_is_uninitialized(newext),
1880                                         ext4_ext_get_actual_len(newext),
1881                                         len, nearex, nearex + 1);
1882                         memmove(nearex + 1, nearex,
1883                                 len * sizeof(struct ext4_extent));
1884                 }
1885         }
1886
1887         le16_add_cpu(&eh->eh_entries, 1);
1888         path[depth].p_ext = nearex;
1889         nearex->ee_block = newext->ee_block;
1890         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1891         nearex->ee_len = newext->ee_len;
1892
1893 merge:
1894         /* try to merge extents */
1895         if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1896                 ext4_ext_try_to_merge(inode, path, nearex);
1897
1898
1899         /* time to correct all indexes above */
1900         err = ext4_ext_correct_indexes(handle, inode, path);
1901         if (err)
1902                 goto cleanup;
1903
1904         err = ext4_ext_dirty(handle, inode, path + depth);
1905
1906 cleanup:
1907         if (npath) {
1908                 ext4_ext_drop_refs(npath);
1909                 kfree(npath);
1910         }
1911         ext4_ext_invalidate_cache(inode);
1912         return err;
1913 }
1914
1915 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1916                                ext4_lblk_t num, ext_prepare_callback func,
1917                                void *cbdata)
1918 {
1919         struct ext4_ext_path *path = NULL;
1920         struct ext4_ext_cache cbex;
1921         struct ext4_extent *ex;
1922         ext4_lblk_t next, start = 0, end = 0;
1923         ext4_lblk_t last = block + num;
1924         int depth, exists, err = 0;
1925
1926         BUG_ON(func == NULL);
1927         BUG_ON(inode == NULL);
1928
1929         while (block < last && block != EXT_MAX_BLOCKS) {
1930                 num = last - block;
1931                 /* find extent for this block */
1932                 down_read(&EXT4_I(inode)->i_data_sem);
1933                 path = ext4_ext_find_extent(inode, block, path);
1934                 up_read(&EXT4_I(inode)->i_data_sem);
1935                 if (IS_ERR(path)) {
1936                         err = PTR_ERR(path);
1937                         path = NULL;
1938                         break;
1939                 }
1940
1941                 depth = ext_depth(inode);
1942                 if (unlikely(path[depth].p_hdr == NULL)) {
1943                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1944                         err = -EIO;
1945                         break;
1946                 }
1947                 ex = path[depth].p_ext;
1948                 next = ext4_ext_next_allocated_block(path);
1949
1950                 exists = 0;
1951                 if (!ex) {
1952                         /* there is no extent yet, so try to allocate
1953                          * all requested space */
1954                         start = block;
1955                         end = block + num;
1956                 } else if (le32_to_cpu(ex->ee_block) > block) {
1957                         /* need to allocate space before found extent */
1958                         start = block;
1959                         end = le32_to_cpu(ex->ee_block);
1960                         if (block + num < end)
1961                                 end = block + num;
1962                 } else if (block >= le32_to_cpu(ex->ee_block)
1963                                         + ext4_ext_get_actual_len(ex)) {
1964                         /* need to allocate space after found extent */
1965                         start = block;
1966                         end = block + num;
1967                         if (end >= next)
1968                                 end = next;
1969                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1970                         /*
1971                          * some part of requested space is covered
1972                          * by found extent
1973                          */
1974                         start = block;
1975                         end = le32_to_cpu(ex->ee_block)
1976                                 + ext4_ext_get_actual_len(ex);
1977                         if (block + num < end)
1978                                 end = block + num;
1979                         exists = 1;
1980                 } else {
1981                         BUG();
1982                 }
1983                 BUG_ON(end <= start);
1984
1985                 if (!exists) {
1986                         cbex.ec_block = start;
1987                         cbex.ec_len = end - start;
1988                         cbex.ec_start = 0;
1989                 } else {
1990                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1991                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1992                         cbex.ec_start = ext4_ext_pblock(ex);
1993                 }
1994
1995                 if (unlikely(cbex.ec_len == 0)) {
1996                         EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1997                         err = -EIO;
1998                         break;
1999                 }
2000                 err = func(inode, next, &cbex, ex, cbdata);
2001                 ext4_ext_drop_refs(path);
2002
2003                 if (err < 0)
2004                         break;
2005
2006                 if (err == EXT_REPEAT)
2007                         continue;
2008                 else if (err == EXT_BREAK) {
2009                         err = 0;
2010                         break;
2011                 }
2012
2013                 if (ext_depth(inode) != depth) {
2014                         /* depth was changed. we have to realloc path */
2015                         kfree(path);
2016                         path = NULL;
2017                 }
2018
2019                 block = cbex.ec_block + cbex.ec_len;
2020         }
2021
2022         if (path) {
2023                 ext4_ext_drop_refs(path);
2024                 kfree(path);
2025         }
2026
2027         return err;
2028 }
2029
2030 static void
2031 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2032                         __u32 len, ext4_fsblk_t start)
2033 {
2034         struct ext4_ext_cache *cex;
2035         BUG_ON(len == 0);
2036         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2037         trace_ext4_ext_put_in_cache(inode, block, len, start);
2038         cex = &EXT4_I(inode)->i_cached_extent;
2039         cex->ec_block = block;
2040         cex->ec_len = len;
2041         cex->ec_start = start;
2042         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2043 }
2044
2045 /*
2046  * ext4_ext_put_gap_in_cache:
2047  * calculate boundaries of the gap that the requested block fits into
2048  * and cache this gap
2049  */
2050 static void
2051 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2052                                 ext4_lblk_t block)
2053 {
2054         int depth = ext_depth(inode);
2055         unsigned long len;
2056         ext4_lblk_t lblock;
2057         struct ext4_extent *ex;
2058
2059         ex = path[depth].p_ext;
2060         if (ex == NULL) {
2061                 /* there is no extent yet, so gap is [0;-] */
2062                 lblock = 0;
2063                 len = EXT_MAX_BLOCKS;
2064                 ext_debug("cache gap(whole file):");
2065         } else if (block < le32_to_cpu(ex->ee_block)) {
2066                 lblock = block;
2067                 len = le32_to_cpu(ex->ee_block) - block;
2068                 ext_debug("cache gap(before): %u [%u:%u]",
2069                                 block,
2070                                 le32_to_cpu(ex->ee_block),
2071                                  ext4_ext_get_actual_len(ex));
2072         } else if (block >= le32_to_cpu(ex->ee_block)
2073                         + ext4_ext_get_actual_len(ex)) {
2074                 ext4_lblk_t next;
2075                 lblock = le32_to_cpu(ex->ee_block)
2076                         + ext4_ext_get_actual_len(ex);
2077
2078                 next = ext4_ext_next_allocated_block(path);
2079                 ext_debug("cache gap(after): [%u:%u] %u",
2080                                 le32_to_cpu(ex->ee_block),
2081                                 ext4_ext_get_actual_len(ex),
2082                                 block);
2083                 BUG_ON(next == lblock);
2084                 len = next - lblock;
2085         } else {
2086                 lblock = len = 0;
2087                 BUG();
2088         }
2089
2090         ext_debug(" -> %u:%lu\n", lblock, len);
2091         ext4_ext_put_in_cache(inode, lblock, len, 0);
2092 }
2093
2094 /*
2095  * ext4_ext_check_cache()
2096  * Checks to see if the given block is in the cache.
2097  * If it is, the cached extent is stored in the given
2098  * cache extent pointer.  If the cached extent is a hole,
2099  * this routine should be used instead of
2100  * ext4_ext_in_cache if the calling function needs to
2101  * know the size of the hole.
2102  *
2103  * @inode: The files inode
2104  * @block: The block to look for in the cache
2105  * @ex:    Pointer where the cached extent will be stored
2106  *         if it contains block
2107  *
2108  * Return 0 if cache is invalid; 1 if the cache is valid
2109  */
2110 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2111         struct ext4_ext_cache *ex){
2112         struct ext4_ext_cache *cex;
2113         struct ext4_sb_info *sbi;
2114         int ret = 0;
2115
2116         /*
2117          * We borrow i_block_reservation_lock to protect i_cached_extent
2118          */
2119         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2120         cex = &EXT4_I(inode)->i_cached_extent;
2121         sbi = EXT4_SB(inode->i_sb);
2122
2123         /* has cache valid data? */
2124         if (cex->ec_len == 0)
2125                 goto errout;
2126
2127         if (in_range(block, cex->ec_block, cex->ec_len)) {
2128                 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2129                 ext_debug("%u cached by %u:%u:%llu\n",
2130                                 block,
2131                                 cex->ec_block, cex->ec_len, cex->ec_start);
2132                 ret = 1;
2133         }
2134 errout:
2135         trace_ext4_ext_in_cache(inode, block, ret);
2136         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2137         return ret;
2138 }
2139
2140 /*
2141  * ext4_ext_in_cache()
2142  * Checks to see if the given block is in the cache.
2143  * If it is, the cached extent is stored in the given
2144  * extent pointer.
2145  *
2146  * @inode: The files inode
2147  * @block: The block to look for in the cache
2148  * @ex:    Pointer where the cached extent will be stored
2149  *         if it contains block
2150  *
2151  * Return 0 if cache is invalid; 1 if the cache is valid
2152  */
2153 static int
2154 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2155                         struct ext4_extent *ex)
2156 {
2157         struct ext4_ext_cache cex;
2158         int ret = 0;
2159
2160         if (ext4_ext_check_cache(inode, block, &cex)) {
2161                 ex->ee_block = cpu_to_le32(cex.ec_block);
2162                 ext4_ext_store_pblock(ex, cex.ec_start);
2163                 ex->ee_len = cpu_to_le16(cex.ec_len);
2164                 ret = 1;
2165         }
2166
2167         return ret;
2168 }
2169
2170
2171 /*
2172  * ext4_ext_rm_idx:
2173  * removes index from the index block.
2174  */
2175 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2176                         struct ext4_ext_path *path)
2177 {
2178         int err;
2179         ext4_fsblk_t leaf;
2180
2181         /* free index block */
2182         path--;
2183         leaf = ext4_idx_pblock(path->p_idx);
2184         if (unlikely(path->p_hdr->eh_entries == 0)) {
2185                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2186                 return -EIO;
2187         }
2188         err = ext4_ext_get_access(handle, inode, path);
2189         if (err)
2190                 return err;
2191
2192         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2193                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2194                 len *= sizeof(struct ext4_extent_idx);
2195                 memmove(path->p_idx, path->p_idx + 1, len);
2196         }
2197
2198         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2199         err = ext4_ext_dirty(handle, inode, path);
2200         if (err)
2201                 return err;
2202         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2203         trace_ext4_ext_rm_idx(inode, leaf);
2204
2205         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2206                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2207         return err;
2208 }
2209
2210 /*
2211  * ext4_ext_calc_credits_for_single_extent:
2212  * This routine returns max. credits that needed to insert an extent
2213  * to the extent tree.
2214  * When pass the actual path, the caller should calculate credits
2215  * under i_data_sem.
2216  */
2217 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2218                                                 struct ext4_ext_path *path)
2219 {
2220         if (path) {
2221                 int depth = ext_depth(inode);
2222                 int ret = 0;
2223
2224                 /* probably there is space in leaf? */
2225                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2226                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2227
2228                         /*
2229                          *  There are some space in the leaf tree, no
2230                          *  need to account for leaf block credit
2231                          *
2232                          *  bitmaps and block group descriptor blocks
2233                          *  and other metadata blocks still need to be
2234                          *  accounted.
2235                          */
2236                         /* 1 bitmap, 1 block group descriptor */
2237                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2238                         return ret;
2239                 }
2240         }
2241
2242         return ext4_chunk_trans_blocks(inode, nrblocks);
2243 }
2244
2245 /*
2246  * How many index/leaf blocks need to change/allocate to modify nrblocks?
2247  *
2248  * if nrblocks are fit in a single extent (chunk flag is 1), then
2249  * in the worse case, each tree level index/leaf need to be changed
2250  * if the tree split due to insert a new extent, then the old tree
2251  * index/leaf need to be updated too
2252  *
2253  * If the nrblocks are discontiguous, they could cause
2254  * the whole tree split more than once, but this is really rare.
2255  */
2256 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2257 {
2258         int index;
2259         int depth = ext_depth(inode);
2260
2261         if (chunk)
2262                 index = depth * 2;
2263         else
2264                 index = depth * 3;
2265
2266         return index;
2267 }
2268
2269 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2270                               struct ext4_extent *ex,
2271                               ext4_fsblk_t *partial_cluster,
2272                               ext4_lblk_t from, ext4_lblk_t to)
2273 {
2274         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2275         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2276         ext4_fsblk_t pblk;
2277         int flags = EXT4_FREE_BLOCKS_FORGET;
2278
2279         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2280                 flags |= EXT4_FREE_BLOCKS_METADATA;
2281         /*
2282          * For bigalloc file systems, we never free a partial cluster
2283          * at the beginning of the extent.  Instead, we make a note
2284          * that we tried freeing the cluster, and check to see if we
2285          * need to free it on a subsequent call to ext4_remove_blocks,
2286          * or at the end of the ext4_truncate() operation.
2287          */
2288         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2289
2290         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2291         /*
2292          * If we have a partial cluster, and it's different from the
2293          * cluster of the last block, we need to explicitly free the
2294          * partial cluster here.
2295          */
2296         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2297         if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2298                 ext4_free_blocks(handle, inode, NULL,
2299                                  EXT4_C2B(sbi, *partial_cluster),
2300                                  sbi->s_cluster_ratio, flags);
2301                 *partial_cluster = 0;
2302         }
2303
2304 #ifdef EXTENTS_STATS
2305         {
2306                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2307                 spin_lock(&sbi->s_ext_stats_lock);
2308                 sbi->s_ext_blocks += ee_len;
2309                 sbi->s_ext_extents++;
2310                 if (ee_len < sbi->s_ext_min)
2311                         sbi->s_ext_min = ee_len;
2312                 if (ee_len > sbi->s_ext_max)
2313                         sbi->s_ext_max = ee_len;
2314                 if (ext_depth(inode) > sbi->s_depth_max)
2315                         sbi->s_depth_max = ext_depth(inode);
2316                 spin_unlock(&sbi->s_ext_stats_lock);
2317         }
2318 #endif
2319         if (from >= le32_to_cpu(ex->ee_block)
2320             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2321                 /* tail removal */
2322                 ext4_lblk_t num;
2323
2324                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2325                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2326                 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2327                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2328                 /*
2329                  * If the block range to be freed didn't start at the
2330                  * beginning of a cluster, and we removed the entire
2331                  * extent, save the partial cluster here, since we
2332                  * might need to delete if we determine that the
2333                  * truncate operation has removed all of the blocks in
2334                  * the cluster.
2335                  */
2336                 if (pblk & (sbi->s_cluster_ratio - 1) &&
2337                     (ee_len == num))
2338                         *partial_cluster = EXT4_B2C(sbi, pblk);
2339                 else
2340                         *partial_cluster = 0;
2341         } else if (from == le32_to_cpu(ex->ee_block)
2342                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2343                 /* head removal */
2344                 ext4_lblk_t num;
2345                 ext4_fsblk_t start;
2346
2347                 num = to - from;
2348                 start = ext4_ext_pblock(ex);
2349
2350                 ext_debug("free first %u blocks starting %llu\n", num, start);
2351                 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2352
2353         } else {
2354                 printk(KERN_INFO "strange request: removal(2) "
2355                                 "%u-%u from %u:%u\n",
2356                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
2357         }
2358         return 0;
2359 }
2360
2361
2362 /*
2363  * ext4_ext_rm_leaf() Removes the extents associated with the
2364  * blocks appearing between "start" and "end", and splits the extents
2365  * if "start" and "end" appear in the same extent
2366  *
2367  * @handle: The journal handle
2368  * @inode:  The files inode
2369  * @path:   The path to the leaf
2370  * @start:  The first block to remove
2371  * @end:   The last block to remove
2372  */
2373 static int
2374 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2375                  struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2376                  ext4_lblk_t start, ext4_lblk_t end)
2377 {
2378         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2379         int err = 0, correct_index = 0;
2380         int depth = ext_depth(inode), credits;
2381         struct ext4_extent_header *eh;
2382         ext4_lblk_t a, b;
2383         unsigned num;
2384         ext4_lblk_t ex_ee_block;
2385         unsigned short ex_ee_len;
2386         unsigned uninitialized = 0;
2387         struct ext4_extent *ex;
2388
2389         /* the header must be checked already in ext4_ext_remove_space() */
2390         ext_debug("truncate since %u in leaf to %u\n", start, end);
2391         if (!path[depth].p_hdr)
2392                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2393         eh = path[depth].p_hdr;
2394         if (unlikely(path[depth].p_hdr == NULL)) {
2395                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2396                 return -EIO;
2397         }
2398         /* find where to start removing */
2399         ex = EXT_LAST_EXTENT(eh);
2400
2401         ex_ee_block = le32_to_cpu(ex->ee_block);
2402         ex_ee_len = ext4_ext_get_actual_len(ex);
2403
2404         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2405
2406         while (ex >= EXT_FIRST_EXTENT(eh) &&
2407                         ex_ee_block + ex_ee_len > start) {
2408
2409                 if (ext4_ext_is_uninitialized(ex))
2410                         uninitialized = 1;
2411                 else
2412                         uninitialized = 0;
2413
2414                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2415                          uninitialized, ex_ee_len);
2416                 path[depth].p_ext = ex;
2417
2418                 a = ex_ee_block > start ? ex_ee_block : start;
2419                 b = ex_ee_block+ex_ee_len - 1 < end ?
2420                         ex_ee_block+ex_ee_len - 1 : end;
2421
2422                 ext_debug("  border %u:%u\n", a, b);
2423
2424                 /* If this extent is beyond the end of the hole, skip it */
2425                 if (end < ex_ee_block) {
2426                         ex--;
2427                         ex_ee_block = le32_to_cpu(ex->ee_block);
2428                         ex_ee_len = ext4_ext_get_actual_len(ex);
2429                         continue;
2430                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2431                         EXT4_ERROR_INODE(inode,
2432                                          "can not handle truncate %u:%u "
2433                                          "on extent %u:%u",
2434                                          start, end, ex_ee_block,
2435                                          ex_ee_block + ex_ee_len - 1);
2436                         err = -EIO;
2437                         goto out;
2438                 } else if (a != ex_ee_block) {
2439                         /* remove tail of the extent */
2440                         num = a - ex_ee_block;
2441                 } else {
2442                         /* remove whole extent: excellent! */
2443                         num = 0;
2444                 }
2445                 /*
2446                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2447                  * descriptor) for each block group; assume two block
2448                  * groups plus ex_ee_len/blocks_per_block_group for
2449                  * the worst case
2450                  */
2451                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2452                 if (ex == EXT_FIRST_EXTENT(eh)) {
2453                         correct_index = 1;
2454                         credits += (ext_depth(inode)) + 1;
2455                 }
2456                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2457
2458                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2459                 if (err)
2460                         goto out;
2461
2462                 err = ext4_ext_get_access(handle, inode, path + depth);
2463                 if (err)
2464                         goto out;
2465
2466                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2467                                          a, b);
2468                 if (err)
2469                         goto out;
2470
2471                 if (num == 0)
2472                         /* this extent is removed; mark slot entirely unused */
2473                         ext4_ext_store_pblock(ex, 0);
2474
2475                 ex->ee_len = cpu_to_le16(num);
2476                 /*
2477                  * Do not mark uninitialized if all the blocks in the
2478                  * extent have been removed.
2479                  */
2480                 if (uninitialized && num)
2481                         ext4_ext_mark_uninitialized(ex);
2482                 /*
2483                  * If the extent was completely released,
2484                  * we need to remove it from the leaf
2485                  */
2486                 if (num == 0) {
2487                         if (end != EXT_MAX_BLOCKS - 1) {
2488                                 /*
2489                                  * For hole punching, we need to scoot all the
2490                                  * extents up when an extent is removed so that
2491                                  * we dont have blank extents in the middle
2492                                  */
2493                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2494                                         sizeof(struct ext4_extent));
2495
2496                                 /* Now get rid of the one at the end */
2497                                 memset(EXT_LAST_EXTENT(eh), 0,
2498                                         sizeof(struct ext4_extent));
2499                         }
2500                         le16_add_cpu(&eh->eh_entries, -1);
2501                 } else
2502                         *partial_cluster = 0;
2503
2504                 err = ext4_ext_dirty(handle, inode, path + depth);
2505                 if (err)
2506                         goto out;
2507
2508                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2509                                 ext4_ext_pblock(ex));
2510                 ex--;
2511                 ex_ee_block = le32_to_cpu(ex->ee_block);
2512                 ex_ee_len = ext4_ext_get_actual_len(ex);
2513         }
2514
2515         if (correct_index && eh->eh_entries)
2516                 err = ext4_ext_correct_indexes(handle, inode, path);
2517
2518         /*
2519          * If there is still a entry in the leaf node, check to see if
2520          * it references the partial cluster.  This is the only place
2521          * where it could; if it doesn't, we can free the cluster.
2522          */
2523         if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2524             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2525              *partial_cluster)) {
2526                 int flags = EXT4_FREE_BLOCKS_FORGET;
2527
2528                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2529                         flags |= EXT4_FREE_BLOCKS_METADATA;
2530
2531                 ext4_free_blocks(handle, inode, NULL,
2532                                  EXT4_C2B(sbi, *partial_cluster),
2533                                  sbi->s_cluster_ratio, flags);
2534                 *partial_cluster = 0;
2535         }
2536
2537         /* if this leaf is free, then we should
2538          * remove it from index block above */
2539         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2540                 err = ext4_ext_rm_idx(handle, inode, path + depth);
2541
2542 out:
2543         return err;
2544 }
2545
2546 /*
2547  * ext4_ext_more_to_rm:
2548  * returns 1 if current index has to be freed (even partial)
2549  */
2550 static int
2551 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2552 {
2553         BUG_ON(path->p_idx == NULL);
2554
2555         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2556                 return 0;
2557
2558         /*
2559          * if truncate on deeper level happened, it wasn't partial,
2560          * so we have to consider current index for truncation
2561          */
2562         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2563                 return 0;
2564         return 1;
2565 }
2566
2567 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2568                                  ext4_lblk_t end)
2569 {
2570         struct super_block *sb = inode->i_sb;
2571         int depth = ext_depth(inode);
2572         struct ext4_ext_path *path = NULL;
2573         ext4_fsblk_t partial_cluster = 0;
2574         handle_t *handle;
2575         int i = 0, err;
2576
2577         ext_debug("truncate since %u to %u\n", start, end);
2578
2579         /* probably first extent we're gonna free will be last in block */
2580         handle = ext4_journal_start(inode, depth + 1);
2581         if (IS_ERR(handle))
2582                 return PTR_ERR(handle);
2583
2584 again:
2585         ext4_ext_invalidate_cache(inode);
2586
2587         trace_ext4_ext_remove_space(inode, start, depth);
2588
2589         /*
2590          * Check if we are removing extents inside the extent tree. If that
2591          * is the case, we are going to punch a hole inside the extent tree
2592          * so we have to check whether we need to split the extent covering
2593          * the last block to remove so we can easily remove the part of it
2594          * in ext4_ext_rm_leaf().
2595          */
2596         if (end < EXT_MAX_BLOCKS - 1) {
2597                 struct ext4_extent *ex;
2598                 ext4_lblk_t ee_block;
2599
2600                 /* find extent for this block */
2601                 path = ext4_ext_find_extent(inode, end, NULL);
2602                 if (IS_ERR(path)) {
2603                         ext4_journal_stop(handle);
2604                         return PTR_ERR(path);
2605                 }
2606                 depth = ext_depth(inode);
2607                 ex = path[depth].p_ext;
2608                 if (!ex) {
2609                         ext4_ext_drop_refs(path);
2610                         kfree(path);
2611                         path = NULL;
2612                         goto cont;
2613                 }
2614
2615                 ee_block = le32_to_cpu(ex->ee_block);
2616
2617                 /*
2618                  * See if the last block is inside the extent, if so split
2619                  * the extent at 'end' block so we can easily remove the
2620                  * tail of the first part of the split extent in
2621                  * ext4_ext_rm_leaf().
2622                  */
2623                 if (end >= ee_block &&
2624                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2625                         int split_flag = 0;
2626
2627                         if (ext4_ext_is_uninitialized(ex))
2628                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2629                                              EXT4_EXT_MARK_UNINIT2;
2630
2631                         /*
2632                          * Split the extent in two so that 'end' is the last
2633                          * block in the first new extent
2634                          */
2635                         err = ext4_split_extent_at(handle, inode, path,
2636                                                 end + 1, split_flag,
2637                                                 EXT4_GET_BLOCKS_PRE_IO |
2638                                                 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2639
2640                         if (err < 0)
2641                                 goto out;
2642                 }
2643         }
2644 cont:
2645
2646         /*
2647          * We start scanning from right side, freeing all the blocks
2648          * after i_size and walking into the tree depth-wise.
2649          */
2650         depth = ext_depth(inode);
2651         if (path) {
2652                 int k = i = depth;
2653                 while (--k > 0)
2654                         path[k].p_block =
2655                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2656         } else {
2657                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2658                                GFP_NOFS);
2659                 if (path == NULL) {
2660                         ext4_journal_stop(handle);
2661                         return -ENOMEM;
2662                 }
2663                 path[0].p_depth = depth;
2664                 path[0].p_hdr = ext_inode_hdr(inode);
2665
2666                 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2667                         err = -EIO;
2668                         goto out;
2669                 }
2670         }
2671         err = 0;
2672
2673         while (i >= 0 && err == 0) {
2674                 if (i == depth) {
2675                         /* this is leaf block */
2676                         err = ext4_ext_rm_leaf(handle, inode, path,
2677                                                &partial_cluster, start,
2678                                                end);
2679                         /* root level has p_bh == NULL, brelse() eats this */
2680                         brelse(path[i].p_bh);
2681                         path[i].p_bh = NULL;
2682                         i--;
2683                         continue;
2684                 }
2685
2686                 /* this is index block */
2687                 if (!path[i].p_hdr) {
2688                         ext_debug("initialize header\n");
2689                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2690                 }
2691
2692                 if (!path[i].p_idx) {
2693                         /* this level hasn't been touched yet */
2694                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2695                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2696                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2697                                   path[i].p_hdr,
2698                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2699                 } else {
2700                         /* we were already here, see at next index */
2701                         path[i].p_idx--;
2702                 }
2703
2704                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2705                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2706                                 path[i].p_idx);
2707                 if (ext4_ext_more_to_rm(path + i)) {
2708                         struct buffer_head *bh;
2709                         /* go to the next level */
2710                         ext_debug("move to level %d (block %llu)\n",
2711                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2712                         memset(path + i + 1, 0, sizeof(*path));
2713                         bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2714                         if (!bh) {
2715                                 /* should we reset i_size? */
2716                                 err = -EIO;
2717                                 break;
2718                         }
2719                         if (WARN_ON(i + 1 > depth)) {
2720                                 err = -EIO;
2721                                 break;
2722                         }
2723                         if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2724                                                         depth - i - 1, bh)) {
2725                                 err = -EIO;
2726                                 break;
2727                         }
2728                         path[i + 1].p_bh = bh;
2729
2730                         /* save actual number of indexes since this
2731                          * number is changed at the next iteration */
2732                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2733                         i++;
2734                 } else {
2735                         /* we finished processing this index, go up */
2736                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2737                                 /* index is empty, remove it;
2738                                  * handle must be already prepared by the
2739                                  * truncatei_leaf() */
2740                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2741                         }
2742                         /* root level has p_bh == NULL, brelse() eats this */
2743                         brelse(path[i].p_bh);
2744                         path[i].p_bh = NULL;
2745                         i--;
2746                         ext_debug("return to level %d\n", i);
2747                 }
2748         }
2749
2750         trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2751                         path->p_hdr->eh_entries);
2752
2753         /* If we still have something in the partial cluster and we have removed
2754          * even the first extent, then we should free the blocks in the partial
2755          * cluster as well. */
2756         if (partial_cluster && path->p_hdr->eh_entries == 0) {
2757                 int flags = EXT4_FREE_BLOCKS_FORGET;
2758
2759                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2760                         flags |= EXT4_FREE_BLOCKS_METADATA;
2761
2762                 ext4_free_blocks(handle, inode, NULL,
2763                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2764                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2765                 partial_cluster = 0;
2766         }
2767
2768         /* TODO: flexible tree reduction should be here */
2769         if (path->p_hdr->eh_entries == 0) {
2770                 /*
2771                  * truncate to zero freed all the tree,
2772                  * so we need to correct eh_depth
2773                  */
2774                 err = ext4_ext_get_access(handle, inode, path);
2775                 if (err == 0) {
2776                         ext_inode_hdr(inode)->eh_depth = 0;
2777                         ext_inode_hdr(inode)->eh_max =
2778                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
2779                         err = ext4_ext_dirty(handle, inode, path);
2780                 }
2781         }
2782 out:
2783         ext4_ext_drop_refs(path);
2784         kfree(path);
2785         if (err == -EAGAIN) {
2786                 path = NULL;
2787                 goto again;
2788         }
2789         ext4_journal_stop(handle);
2790
2791         return err;
2792 }
2793
2794 /*
2795  * called at mount time
2796  */
2797 void ext4_ext_init(struct super_block *sb)
2798 {
2799         /*
2800          * possible initialization would be here
2801          */
2802
2803         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2804 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2805                 printk(KERN_INFO "EXT4-fs: file extents enabled"
2806 #ifdef AGGRESSIVE_TEST
2807                        ", aggressive tests"
2808 #endif
2809 #ifdef CHECK_BINSEARCH
2810                        ", check binsearch"
2811 #endif
2812 #ifdef EXTENTS_STATS
2813                        ", stats"
2814 #endif
2815                        "\n");
2816 #endif
2817 #ifdef EXTENTS_STATS
2818                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2819                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2820                 EXT4_SB(sb)->s_ext_max = 0;
2821 #endif
2822         }
2823 }
2824
2825 /*
2826  * called at umount time
2827  */
2828 void ext4_ext_release(struct super_block *sb)
2829 {
2830         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2831                 return;
2832
2833 #ifdef EXTENTS_STATS
2834         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2835                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2836                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2837                         sbi->s_ext_blocks, sbi->s_ext_extents,
2838                         sbi->s_ext_blocks / sbi->s_ext_extents);
2839                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2840                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2841         }
2842 #endif
2843 }
2844
2845 /* FIXME!! we need to try to merge to left or right after zero-out  */
2846 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2847 {
2848         ext4_fsblk_t ee_pblock;
2849         unsigned int ee_len;
2850         int ret;
2851
2852         ee_len    = ext4_ext_get_actual_len(ex);
2853         ee_pblock = ext4_ext_pblock(ex);
2854
2855         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2856         if (ret > 0)
2857                 ret = 0;
2858
2859         return ret;
2860 }
2861
2862 /*
2863  * ext4_split_extent_at() splits an extent at given block.
2864  *
2865  * @handle: the journal handle
2866  * @inode: the file inode
2867  * @path: the path to the extent
2868  * @split: the logical block where the extent is splitted.
2869  * @split_flags: indicates if the extent could be zeroout if split fails, and
2870  *               the states(init or uninit) of new extents.
2871  * @flags: flags used to insert new extent to extent tree.
2872  *
2873  *
2874  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2875  * of which are deterimined by split_flag.
2876  *
2877  * There are two cases:
2878  *  a> the extent are splitted into two extent.
2879  *  b> split is not needed, and just mark the extent.
2880  *
2881  * return 0 on success.
2882  */
2883 static int ext4_split_extent_at(handle_t *handle,
2884                              struct inode *inode,
2885                              struct ext4_ext_path *path,
2886                              ext4_lblk_t split,
2887                              int split_flag,
2888                              int flags)
2889 {
2890         ext4_fsblk_t newblock;
2891         ext4_lblk_t ee_block;
2892         struct ext4_extent *ex, newex, orig_ex;
2893         struct ext4_extent *ex2 = NULL;
2894         unsigned int ee_len, depth;
2895         int err = 0;
2896
2897         ext_debug("ext4_split_extents_at: inode %lu, logical"
2898                 "block %llu\n", inode->i_ino, (unsigned long long)split);
2899
2900         ext4_ext_show_leaf(inode, path);
2901
2902         depth = ext_depth(inode);
2903         ex = path[depth].p_ext;
2904         ee_block = le32_to_cpu(ex->ee_block);
2905         ee_len = ext4_ext_get_actual_len(ex);
2906         newblock = split - ee_block + ext4_ext_pblock(ex);
2907
2908         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2909
2910         err = ext4_ext_get_access(handle, inode, path + depth);
2911         if (err)
2912                 goto out;
2913
2914         if (split == ee_block) {
2915                 /*
2916                  * case b: block @split is the block that the extent begins with
2917                  * then we just change the state of the extent, and splitting
2918                  * is not needed.
2919                  */
2920                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2921                         ext4_ext_mark_uninitialized(ex);
2922                 else
2923                         ext4_ext_mark_initialized(ex);
2924
2925                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2926                         ext4_ext_try_to_merge(inode, path, ex);
2927
2928                 err = ext4_ext_dirty(handle, inode, path + depth);
2929                 goto out;
2930         }
2931
2932         /* case a */
2933         memcpy(&orig_ex, ex, sizeof(orig_ex));
2934         ex->ee_len = cpu_to_le16(split - ee_block);
2935         if (split_flag & EXT4_EXT_MARK_UNINIT1)
2936                 ext4_ext_mark_uninitialized(ex);
2937
2938         /*
2939          * path may lead to new leaf, not to original leaf any more
2940          * after ext4_ext_insert_extent() returns,
2941          */
2942         err = ext4_ext_dirty(handle, inode, path + depth);
2943         if (err)
2944                 goto fix_extent_len;
2945
2946         ex2 = &newex;
2947         ex2->ee_block = cpu_to_le32(split);
2948         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
2949         ext4_ext_store_pblock(ex2, newblock);
2950         if (split_flag & EXT4_EXT_MARK_UNINIT2)
2951                 ext4_ext_mark_uninitialized(ex2);
2952
2953         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2954         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2955                 err = ext4_ext_zeroout(inode, &orig_ex);
2956                 if (err)
2957                         goto fix_extent_len;
2958                 /* update the extent length and mark as initialized */
2959                 ex->ee_len = cpu_to_le16(ee_len);
2960                 ext4_ext_try_to_merge(inode, path, ex);
2961                 err = ext4_ext_dirty(handle, inode, path + depth);
2962                 goto out;
2963         } else if (err)
2964                 goto fix_extent_len;
2965
2966 out:
2967         ext4_ext_show_leaf(inode, path);
2968         return err;
2969
2970 fix_extent_len:
2971         ex->ee_len = orig_ex.ee_len;
2972         ext4_ext_dirty(handle, inode, path + depth);
2973         return err;
2974 }
2975
2976 /*
2977  * ext4_split_extents() splits an extent and mark extent which is covered
2978  * by @map as split_flags indicates
2979  *
2980  * It may result in splitting the extent into multiple extents (upto three)
2981  * There are three possibilities:
2982  *   a> There is no split required
2983  *   b> Splits in two extents: Split is happening at either end of the extent
2984  *   c> Splits in three extents: Somone is splitting in middle of the extent
2985  *
2986  */
2987 static int ext4_split_extent(handle_t *handle,
2988                               struct inode *inode,
2989                               struct ext4_ext_path *path,
2990                               struct ext4_map_blocks *map,
2991                               int split_flag,
2992                               int flags)
2993 {
2994         ext4_lblk_t ee_block;
2995         struct ext4_extent *ex;
2996         unsigned int ee_len, depth;
2997         int err = 0;
2998         int uninitialized;
2999         int split_flag1, flags1;
3000
3001         depth = ext_depth(inode);
3002         ex = path[depth].p_ext;
3003         ee_block = le32_to_cpu(ex->ee_block);
3004         ee_len = ext4_ext_get_actual_len(ex);
3005         uninitialized = ext4_ext_is_uninitialized(ex);
3006
3007         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3008                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
3009                               EXT4_EXT_MAY_ZEROOUT : 0;
3010                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3011                 if (uninitialized)
3012                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3013                                        EXT4_EXT_MARK_UNINIT2;
3014                 err = ext4_split_extent_at(handle, inode, path,
3015                                 map->m_lblk + map->m_len, split_flag1, flags1);
3016                 if (err)
3017                         goto out;
3018         }
3019
3020         ext4_ext_drop_refs(path);
3021         path = ext4_ext_find_extent(inode, map->m_lblk, path);
3022         if (IS_ERR(path))
3023                 return PTR_ERR(path);
3024
3025         if (map->m_lblk >= ee_block) {
3026                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
3027                               EXT4_EXT_MAY_ZEROOUT : 0;
3028                 if (uninitialized)
3029                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3030                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3031                         split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3032                 err = ext4_split_extent_at(handle, inode, path,
3033                                 map->m_lblk, split_flag1, flags);
3034                 if (err)
3035                         goto out;
3036         }
3037
3038         ext4_ext_show_leaf(inode, path);
3039 out:
3040         return err ? err : map->m_len;
3041 }
3042
3043 #define EXT4_EXT_ZERO_LEN 7
3044 /*
3045  * This function is called by ext4_ext_map_blocks() if someone tries to write
3046  * to an uninitialized extent. It may result in splitting the uninitialized
3047  * extent into multiple extents (up to three - one initialized and two
3048  * uninitialized).
3049  * There are three possibilities:
3050  *   a> There is no split required: Entire extent should be initialized
3051  *   b> Splits in two extents: Write is happening at either end of the extent
3052  *   c> Splits in three extents: Somone is writing in middle of the extent
3053  *
3054  * Pre-conditions:
3055  *  - The extent pointed to by 'path' is uninitialized.
3056  *  - The extent pointed to by 'path' contains a superset
3057  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3058  *
3059  * Post-conditions on success:
3060  *  - the returned value is the number of blocks beyond map->l_lblk
3061  *    that are allocated and initialized.
3062  *    It is guaranteed to be >= map->m_len.
3063  */
3064 static int ext4_ext_convert_to_initialized(handle_t *handle,
3065                                            struct inode *inode,
3066                                            struct ext4_map_blocks *map,
3067                                            struct ext4_ext_path *path)
3068 {
3069         struct ext4_extent_header *eh;
3070         struct ext4_map_blocks split_map;
3071         struct ext4_extent zero_ex;
3072         struct ext4_extent *ex;
3073         ext4_lblk_t ee_block, eof_block;
3074         unsigned int ee_len, depth;
3075         int allocated;
3076         int err = 0;
3077         int split_flag = 0;
3078
3079         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3080                 "block %llu, max_blocks %u\n", inode->i_ino,
3081                 (unsigned long long)map->m_lblk, map->m_len);
3082
3083         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3084                 inode->i_sb->s_blocksize_bits;
3085         if (eof_block < map->m_lblk + map->m_len)
3086                 eof_block = map->m_lblk + map->m_len;
3087
3088         depth = ext_depth(inode);
3089         eh = path[depth].p_hdr;
3090         ex = path[depth].p_ext;
3091         ee_block = le32_to_cpu(ex->ee_block);
3092         ee_len = ext4_ext_get_actual_len(ex);
3093         allocated = ee_len - (map->m_lblk - ee_block);
3094
3095         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3096
3097         /* Pre-conditions */
3098         BUG_ON(!ext4_ext_is_uninitialized(ex));
3099         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3100
3101         /*
3102          * Attempt to transfer newly initialized blocks from the currently
3103          * uninitialized extent to its left neighbor. This is much cheaper
3104          * than an insertion followed by a merge as those involve costly
3105          * memmove() calls. This is the common case in steady state for
3106          * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3107          * writes.
3108          *
3109          * Limitations of the current logic:
3110          *  - L1: we only deal with writes at the start of the extent.
3111          *    The approach could be extended to writes at the end
3112          *    of the extent but this scenario was deemed less common.
3113          *  - L2: we do not deal with writes covering the whole extent.
3114          *    This would require removing the extent if the transfer
3115          *    is possible.
3116          *  - L3: we only attempt to merge with an extent stored in the
3117          *    same extent tree node.
3118          */
3119         if ((map->m_lblk == ee_block) &&        /*L1*/
3120                 (map->m_len < ee_len) &&        /*L2*/
3121                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L3*/
3122                 struct ext4_extent *prev_ex;
3123                 ext4_lblk_t prev_lblk;
3124                 ext4_fsblk_t prev_pblk, ee_pblk;
3125                 unsigned int prev_len, write_len;
3126
3127                 prev_ex = ex - 1;
3128                 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3129                 prev_len = ext4_ext_get_actual_len(prev_ex);
3130                 prev_pblk = ext4_ext_pblock(prev_ex);
3131                 ee_pblk = ext4_ext_pblock(ex);
3132                 write_len = map->m_len;
3133
3134                 /*
3135                  * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3136                  * upon those conditions:
3137                  * - C1: prev_ex is initialized,
3138                  * - C2: prev_ex is logically abutting ex,
3139                  * - C3: prev_ex is physically abutting ex,
3140                  * - C4: prev_ex can receive the additional blocks without
3141                  *   overflowing the (initialized) length limit.
3142                  */
3143                 if ((!ext4_ext_is_uninitialized(prev_ex)) &&            /*C1*/
3144                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3145                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3146                         (prev_len < (EXT_INIT_MAX_LEN - write_len))) {  /*C4*/
3147                         err = ext4_ext_get_access(handle, inode, path + depth);
3148                         if (err)
3149                                 goto out;
3150
3151                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3152                                 map, ex, prev_ex);
3153
3154                         /* Shift the start of ex by 'write_len' blocks */
3155                         ex->ee_block = cpu_to_le32(ee_block + write_len);
3156                         ext4_ext_store_pblock(ex, ee_pblk + write_len);
3157                         ex->ee_len = cpu_to_le16(ee_len - write_len);
3158                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3159
3160                         /* Extend prev_ex by 'write_len' blocks */
3161                         prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3162
3163                         /* Mark the block containing both extents as dirty */
3164                         ext4_ext_dirty(handle, inode, path + depth);
3165
3166                         /* Update path to point to the right extent */
3167                         path[depth].p_ext = prev_ex;
3168
3169                         /* Result: number of initialized blocks past m_lblk */
3170                         allocated = write_len;
3171                         goto out;
3172                 }
3173         }
3174
3175         WARN_ON(map->m_lblk < ee_block);
3176         /*
3177          * It is safe to convert extent to initialized via explicit
3178          * zeroout only if extent is fully insde i_size or new_size.
3179          */
3180         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3181
3182         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3183         if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3184             (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3185                 err = ext4_ext_zeroout(inode, ex);
3186                 if (err)
3187                         goto out;
3188
3189                 err = ext4_ext_get_access(handle, inode, path + depth);
3190                 if (err)
3191                         goto out;
3192                 ext4_ext_mark_initialized(ex);
3193                 ext4_ext_try_to_merge(inode, path, ex);
3194                 err = ext4_ext_dirty(handle, inode, path + depth);
3195                 goto out;
3196         }
3197
3198         /*
3199          * four cases:
3200          * 1. split the extent into three extents.
3201          * 2. split the extent into two extents, zeroout the first half.
3202          * 3. split the extent into two extents, zeroout the second half.
3203          * 4. split the extent into two extents with out zeroout.
3204          */
3205         split_map.m_lblk = map->m_lblk;
3206         split_map.m_len = map->m_len;
3207
3208         if (allocated > map->m_len) {
3209                 if (allocated <= EXT4_EXT_ZERO_LEN &&
3210                     (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3211                         /* case 3 */
3212                         zero_ex.ee_block =
3213                                          cpu_to_le32(map->m_lblk);
3214                         zero_ex.ee_len = cpu_to_le16(allocated);
3215                         ext4_ext_store_pblock(&zero_ex,
3216                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3217                         err = ext4_ext_zeroout(inode, &zero_ex);
3218                         if (err)
3219                                 goto out;
3220                         split_map.m_lblk = map->m_lblk;
3221                         split_map.m_len = allocated;
3222                 } else if ((map->m_lblk - ee_block + map->m_len <
3223                            EXT4_EXT_ZERO_LEN) &&
3224                            (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3225                         /* case 2 */
3226                         if (map->m_lblk != ee_block) {
3227                                 zero_ex.ee_block = ex->ee_block;
3228                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3229                                                         ee_block);
3230                                 ext4_ext_store_pblock(&zero_ex,
3231                                                       ext4_ext_pblock(ex));
3232                                 err = ext4_ext_zeroout(inode, &zero_ex);
3233                                 if (err)
3234                                         goto out;
3235                         }
3236
3237                         split_map.m_lblk = ee_block;
3238                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3239                         allocated = map->m_len;
3240                 }
3241         }
3242
3243         allocated = ext4_split_extent(handle, inode, path,
3244                                        &split_map, split_flag, 0);
3245         if (allocated < 0)
3246                 err = allocated;
3247
3248 out:
3249         return err ? err : allocated;
3250 }
3251
3252 /*
3253  * This function is called by ext4_ext_map_blocks() from
3254  * ext4_get_blocks_dio_write() when DIO to write
3255  * to an uninitialized extent.
3256  *
3257  * Writing to an uninitialized extent may result in splitting the uninitialized
3258  * extent into multiple /initialized uninitialized extents (up to three)
3259  * There are three possibilities:
3260  *   a> There is no split required: Entire extent should be uninitialized
3261  *   b> Splits in two extents: Write is happening at either end of the extent
3262  *   c> Splits in three extents: Somone is writing in middle of the extent
3263  *
3264  * One of more index blocks maybe needed if the extent tree grow after
3265  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3266  * complete, we need to split the uninitialized extent before DIO submit
3267  * the IO. The uninitialized extent called at this time will be split
3268  * into three uninitialized extent(at most). After IO complete, the part
3269  * being filled will be convert to initialized by the end_io callback function
3270  * via ext4_convert_unwritten_extents().
3271  *
3272  * Returns the size of uninitialized extent to be written on success.
3273  */
3274 static int ext4_split_unwritten_extents(handle_t *handle,
3275                                         struct inode *inode,
3276                                         struct ext4_map_blocks *map,
3277                                         struct ext4_ext_path *path,
3278                                         int flags)
3279 {
3280         ext4_lblk_t eof_block;
3281         ext4_lblk_t ee_block;
3282         struct ext4_extent *ex;
3283         unsigned int ee_len;
3284         int split_flag = 0, depth;
3285
3286         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3287                 "block %llu, max_blocks %u\n", inode->i_ino,
3288                 (unsigned long long)map->m_lblk, map->m_len);
3289
3290         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3291                 inode->i_sb->s_blocksize_bits;
3292         if (eof_block < map->m_lblk + map->m_len)
3293                 eof_block = map->m_lblk + map->m_len;
3294         /*
3295          * It is safe to convert extent to initialized via explicit
3296          * zeroout only if extent is fully insde i_size or new_size.
3297          */
3298         depth = ext_depth(inode);
3299         ex = path[depth].p_ext;
3300         ee_block = le32_to_cpu(ex->ee_block);
3301         ee_len = ext4_ext_get_actual_len(ex);
3302
3303         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3304         split_flag |= EXT4_EXT_MARK_UNINIT2;
3305
3306         flags |= EXT4_GET_BLOCKS_PRE_IO;
3307         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3308 }
3309
3310 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3311                                               struct inode *inode,
3312                                               struct ext4_ext_path *path)
3313 {
3314         struct ext4_extent *ex;
3315         int depth;
3316         int err = 0;
3317
3318         depth = ext_depth(inode);
3319         ex = path[depth].p_ext;
3320
3321         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3322                 "block %llu, max_blocks %u\n", inode->i_ino,
3323                 (unsigned long long)le32_to_cpu(ex->ee_block),
3324                 ext4_ext_get_actual_len(ex));
3325
3326         err = ext4_ext_get_access(handle, inode, path + depth);
3327         if (err)
3328                 goto out;
3329         /* first mark the extent as initialized */
3330         ext4_ext_mark_initialized(ex);
3331
3332         /* note: ext4_ext_correct_indexes() isn't needed here because
3333          * borders are not changed
3334          */
3335         ext4_ext_try_to_merge(inode, path, ex);
3336
3337         /* Mark modified extent as dirty */
3338         err = ext4_ext_dirty(handle, inode, path + depth);
3339 out:
3340         ext4_ext_show_leaf(inode, path);
3341         return err;
3342 }
3343
3344 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3345                         sector_t block, int count)
3346 {
3347         int i;
3348         for (i = 0; i < count; i++)
3349                 unmap_underlying_metadata(bdev, block + i);
3350 }
3351
3352 /*
3353  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3354  */
3355 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3356                               ext4_lblk_t lblk,
3357                               struct ext4_ext_path *path,
3358                               unsigned int len)
3359 {
3360         int i, depth;
3361         struct ext4_extent_header *eh;
3362         struct ext4_extent *last_ex;
3363
3364         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3365                 return 0;
3366
3367         depth = ext_depth(inode);
3368         eh = path[depth].p_hdr;
3369
3370         /*
3371          * We're going to remove EOFBLOCKS_FL entirely in future so we
3372          * do not care for this case anymore. Simply remove the flag
3373          * if there are no extents.
3374          */
3375         if (unlikely(!eh->eh_entries))
3376                 goto out;
3377         last_ex = EXT_LAST_EXTENT(eh);
3378         /*
3379          * We should clear the EOFBLOCKS_FL flag if we are writing the
3380          * last block in the last extent in the file.  We test this by
3381          * first checking to see if the caller to
3382          * ext4_ext_get_blocks() was interested in the last block (or
3383          * a block beyond the last block) in the current extent.  If
3384          * this turns out to be false, we can bail out from this
3385          * function immediately.
3386          */
3387         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3388             ext4_ext_get_actual_len(last_ex))
3389                 return 0;
3390         /*
3391          * If the caller does appear to be planning to write at or
3392          * beyond the end of the current extent, we then test to see
3393          * if the current extent is the last extent in the file, by
3394          * checking to make sure it was reached via the rightmost node
3395          * at each level of the tree.
3396          */
3397         for (i = depth-1; i >= 0; i--)
3398                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3399                         return 0;
3400 out:
3401         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3402         return ext4_mark_inode_dirty(handle, inode);
3403 }
3404
3405 /**
3406  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3407  *
3408  * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3409  * whether there are any buffers marked for delayed allocation. It returns '1'
3410  * on the first delalloc'ed buffer head found. If no buffer head in the given
3411  * range is marked for delalloc, it returns 0.
3412  * lblk_start should always be <= lblk_end.
3413  * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3414  * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3415  * block sooner). This is useful when blocks are truncated sequentially from
3416  * lblk_start towards lblk_end.
3417  */
3418 static int ext4_find_delalloc_range(struct inode *inode,
3419                                     ext4_lblk_t lblk_start,
3420                                     ext4_lblk_t lblk_end,
3421                                     int search_hint_reverse)
3422 {
3423         struct address_space *mapping = inode->i_mapping;
3424         struct buffer_head *head, *bh = NULL;
3425         struct page *page;
3426         ext4_lblk_t i, pg_lblk;
3427         pgoff_t index;
3428
3429         if (!test_opt(inode->i_sb, DELALLOC))
3430                 return 0;
3431
3432         /* reverse search wont work if fs block size is less than page size */
3433         if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3434                 search_hint_reverse = 0;
3435
3436         if (search_hint_reverse)
3437                 i = lblk_end;
3438         else
3439                 i = lblk_start;
3440
3441         index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3442
3443         while ((i >= lblk_start) && (i <= lblk_end)) {
3444                 page = find_get_page(mapping, index);
3445                 if (!page)
3446                         goto nextpage;
3447
3448                 if (!page_has_buffers(page))
3449                         goto nextpage;
3450
3451                 head = page_buffers(page);
3452                 if (!head)
3453                         goto nextpage;
3454
3455                 bh = head;
3456                 pg_lblk = index << (PAGE_CACHE_SHIFT -
3457                                                 inode->i_blkbits);
3458                 do {
3459                         if (unlikely(pg_lblk < lblk_start)) {
3460                                 /*
3461                                  * This is possible when fs block size is less
3462                                  * than page size and our cluster starts/ends in
3463                                  * middle of the page. So we need to skip the
3464                                  * initial few blocks till we reach the 'lblk'
3465                                  */
3466                                 pg_lblk++;
3467                                 continue;
3468                         }
3469
3470                         /* Check if the buffer is delayed allocated and that it
3471                          * is not yet mapped. (when da-buffers are mapped during
3472                          * their writeout, their da_mapped bit is set.)
3473                          */
3474                         if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3475                                 page_cache_release(page);
3476                                 trace_ext4_find_delalloc_range(inode,
3477                                                 lblk_start, lblk_end,
3478                                                 search_hint_reverse,
3479                                                 1, i);
3480                                 return 1;
3481                         }
3482                         if (search_hint_reverse)
3483                                 i--;
3484                         else
3485                                 i++;
3486                 } while ((i >= lblk_start) && (i <= lblk_end) &&
3487                                 ((bh = bh->b_this_page) != head));
3488 nextpage:
3489                 if (page)
3490                         page_cache_release(page);
3491                 /*
3492                  * Move to next page. 'i' will be the first lblk in the next
3493                  * page.
3494                  */
3495                 if (search_hint_reverse)
3496                         index--;
3497                 else
3498                         index++;
3499                 i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3500         }
3501
3502         trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3503                                         search_hint_reverse, 0, 0);
3504         return 0;
3505 }
3506
3507 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3508                                int search_hint_reverse)
3509 {
3510         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3511         ext4_lblk_t lblk_start, lblk_end;
3512         lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3513         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3514
3515         return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3516                                         search_hint_reverse);
3517 }
3518
3519 /**
3520  * Determines how many complete clusters (out of those specified by the 'map')
3521  * are under delalloc and were reserved quota for.
3522  * This function is called when we are writing out the blocks that were
3523  * originally written with their allocation delayed, but then the space was
3524  * allocated using fallocate() before the delayed allocation could be resolved.
3525  * The cases to look for are:
3526  * ('=' indicated delayed allocated blocks
3527  *  '-' indicates non-delayed allocated blocks)
3528  * (a) partial clusters towards beginning and/or end outside of allocated range
3529  *     are not delalloc'ed.
3530  *      Ex:
3531  *      |----c---=|====c====|====c====|===-c----|
3532  *               |++++++ allocated ++++++|
3533  *      ==> 4 complete clusters in above example
3534  *
3535  * (b) partial cluster (outside of allocated range) towards either end is
3536  *     marked for delayed allocation. In this case, we will exclude that
3537  *     cluster.
3538  *      Ex:
3539  *      |----====c========|========c========|
3540  *           |++++++ allocated ++++++|
3541  *      ==> 1 complete clusters in above example
3542  *
3543  *      Ex:
3544  *      |================c================|
3545  *            |++++++ allocated ++++++|
3546  *      ==> 0 complete clusters in above example
3547  *
3548  * The ext4_da_update_reserve_space will be called only if we
3549  * determine here that there were some "entire" clusters that span
3550  * this 'allocated' range.
3551  * In the non-bigalloc case, this function will just end up returning num_blks
3552  * without ever calling ext4_find_delalloc_range.
3553  */
3554 static unsigned int
3555 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3556                            unsigned int num_blks)
3557 {
3558         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3559         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3560         ext4_lblk_t lblk_from, lblk_to, c_offset;
3561         unsigned int allocated_clusters = 0;
3562
3563         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3564         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3565
3566         /* max possible clusters for this allocation */
3567         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3568
3569         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3570
3571         /* Check towards left side */
3572         c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3573         if (c_offset) {
3574                 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3575                 lblk_to = lblk_from + c_offset - 1;
3576
3577                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3578                         allocated_clusters--;
3579         }
3580
3581         /* Now check towards right. */
3582         c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3583         if (allocated_clusters && c_offset) {
3584                 lblk_from = lblk_start + num_blks;
3585                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3586
3587                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3588                         allocated_clusters--;
3589         }
3590
3591         return allocated_clusters;
3592 }
3593
3594 static int
3595 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3596                         struct ext4_map_blocks *map,
3597                         struct ext4_ext_path *path, int flags,
3598                         unsigned int allocated, ext4_fsblk_t newblock)
3599 {
3600         int ret = 0;
3601         int err = 0;
3602         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3603
3604         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3605                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3606                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3607                   flags, allocated);
3608         ext4_ext_show_leaf(inode, path);
3609
3610         trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3611                                                     newblock);
3612
3613         /* get_block() before submit the IO, split the extent */
3614         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3615                 ret = ext4_split_unwritten_extents(handle, inode, map,
3616                                                    path, flags);
3617                 /*
3618                  * Flag the inode(non aio case) or end_io struct (aio case)
3619                  * that this IO needs to conversion to written when IO is
3620                  * completed
3621                  */
3622                 if (io)
3623                         ext4_set_io_unwritten_flag(inode, io);
3624                 else
3625                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3626                 if (ext4_should_dioread_nolock(inode))
3627                         map->m_flags |= EXT4_MAP_UNINIT;
3628                 goto out;
3629         }
3630         /* IO end_io complete, convert the filled extent to written */
3631         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3632                 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3633                                                         path);
3634                 if (ret >= 0) {
3635                         ext4_update_inode_fsync_trans(handle, inode, 1);
3636                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
3637                                                  path, map->m_len);
3638                 } else
3639                         err = ret;
3640                 goto out2;
3641         }
3642         /* buffered IO case */
3643         /*
3644          * repeat fallocate creation request
3645          * we already have an unwritten extent
3646          */
3647         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3648                 goto map_out;
3649
3650         /* buffered READ or buffered write_begin() lookup */
3651         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3652                 /*
3653                  * We have blocks reserved already.  We
3654                  * return allocated blocks so that delalloc
3655                  * won't do block reservation for us.  But
3656                  * the buffer head will be unmapped so that
3657                  * a read from the block returns 0s.
3658                  */
3659                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3660                 goto out1;
3661         }
3662
3663         /* buffered write, writepage time, convert*/
3664         ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3665         if (ret >= 0)
3666                 ext4_update_inode_fsync_trans(handle, inode, 1);
3667 out:
3668         if (ret <= 0) {
3669                 err = ret;
3670                 goto out2;
3671         } else
3672                 allocated = ret;
3673         map->m_flags |= EXT4_MAP_NEW;
3674         /*
3675          * if we allocated more blocks than requested
3676          * we need to make sure we unmap the extra block
3677          * allocated. The actual needed block will get
3678          * unmapped later when we find the buffer_head marked
3679          * new.
3680          */
3681         if (allocated > map->m_len) {
3682                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3683                                         newblock + map->m_len,
3684                                         allocated - map->m_len);
3685                 allocated = map->m_len;
3686         }
3687
3688         /*
3689          * If we have done fallocate with the offset that is already
3690          * delayed allocated, we would have block reservation
3691          * and quota reservation done in the delayed write path.
3692          * But fallocate would have already updated quota and block
3693          * count for this offset. So cancel these reservation
3694          */
3695         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3696                 unsigned int reserved_clusters;
3697                 reserved_clusters = get_reserved_cluster_alloc(inode,
3698                                 map->m_lblk, map->m_len);
3699                 if (reserved_clusters)
3700                         ext4_da_update_reserve_space(inode,
3701                                                      reserved_clusters,
3702                                                      0);
3703         }
3704
3705 map_out:
3706         map->m_flags |= EXT4_MAP_MAPPED;
3707         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3708                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3709                                          map->m_len);
3710                 if (err < 0)
3711                         goto out2;
3712         }
3713 out1:
3714         if (allocated > map->m_len)
3715                 allocated = map->m_len;
3716         ext4_ext_show_leaf(inode, path);
3717         map->m_pblk = newblock;
3718         map->m_len = allocated;
3719 out2:
3720         if (path) {
3721                 ext4_ext_drop_refs(path);
3722                 kfree(path);
3723         }
3724         return err ? err : allocated;
3725 }
3726
3727 /*
3728  * get_implied_cluster_alloc - check to see if the requested
3729  * allocation (in the map structure) overlaps with a cluster already
3730  * allocated in an extent.
3731  *      @sb     The filesystem superblock structure
3732  *      @map    The requested lblk->pblk mapping
3733  *      @ex     The extent structure which might contain an implied
3734  *                      cluster allocation
3735  *
3736  * This function is called by ext4_ext_map_blocks() after we failed to
3737  * find blocks that were already in the inode's extent tree.  Hence,
3738  * we know that the beginning of the requested region cannot overlap
3739  * the extent from the inode's extent tree.  There are three cases we
3740  * want to catch.  The first is this case:
3741  *
3742  *               |--- cluster # N--|
3743  *    |--- extent ---|  |---- requested region ---|
3744  *                      |==========|
3745  *
3746  * The second case that we need to test for is this one:
3747  *
3748  *   |--------- cluster # N ----------------|
3749  *         |--- requested region --|   |------- extent ----|
3750  *         |=======================|
3751  *
3752  * The third case is when the requested region lies between two extents
3753  * within the same cluster:
3754  *          |------------- cluster # N-------------|
3755  * |----- ex -----|                  |---- ex_right ----|
3756  *                  |------ requested region ------|
3757  *                  |================|
3758  *
3759  * In each of the above cases, we need to set the map->m_pblk and
3760  * map->m_len so it corresponds to the return the extent labelled as
3761  * "|====|" from cluster #N, since it is already in use for data in
3762  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
3763  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3764  * as a new "allocated" block region.  Otherwise, we will return 0 and
3765  * ext4_ext_map_blocks() will then allocate one or more new clusters
3766  * by calling ext4_mb_new_blocks().
3767  */
3768 static int get_implied_cluster_alloc(struct super_block *sb,
3769                                      struct ext4_map_blocks *map,
3770                                      struct ext4_extent *ex,
3771                                      struct ext4_ext_path *path)
3772 {
3773         struct ext4_sb_info *sbi = EXT4_SB(sb);
3774         ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3775         ext4_lblk_t ex_cluster_start, ex_cluster_end;
3776         ext4_lblk_t rr_cluster_start;
3777         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3778         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3779         unsigned short ee_len = ext4_ext_get_actual_len(ex);
3780
3781         /* The extent passed in that we are trying to match */
3782         ex_cluster_start = EXT4_B2C(sbi, ee_block);
3783         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3784
3785         /* The requested region passed into ext4_map_blocks() */
3786         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3787
3788         if ((rr_cluster_start == ex_cluster_end) ||
3789             (rr_cluster_start == ex_cluster_start)) {
3790                 if (rr_cluster_start == ex_cluster_end)
3791                         ee_start += ee_len - 1;
3792                 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3793                         c_offset;
3794                 map->m_len = min(map->m_len,
3795                                  (unsigned) sbi->s_cluster_ratio - c_offset);
3796                 /*
3797                  * Check for and handle this case:
3798                  *
3799                  *   |--------- cluster # N-------------|
3800                  *                     |------- extent ----|
3801                  *         |--- requested region ---|
3802                  *         |===========|
3803                  */
3804
3805                 if (map->m_lblk < ee_block)
3806                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
3807
3808                 /*
3809                  * Check for the case where there is already another allocated
3810                  * block to the right of 'ex' but before the end of the cluster.
3811                  *
3812                  *          |------------- cluster # N-------------|
3813                  * |----- ex -----|                  |---- ex_right ----|
3814                  *                  |------ requested region ------|
3815                  *                  |================|
3816                  */
3817                 if (map->m_lblk > ee_block) {
3818                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3819                         map->m_len = min(map->m_len, next - map->m_lblk);
3820                 }
3821
3822                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3823                 return 1;
3824         }
3825
3826         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3827         return 0;
3828 }
3829
3830
3831 /*
3832  * Block allocation/map/preallocation routine for extents based files
3833  *
3834  *
3835  * Need to be called with
3836  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3837  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3838  *
3839  * return > 0, number of of blocks already mapped/allocated
3840  *          if create == 0 and these are pre-allocated blocks
3841  *              buffer head is unmapped
3842  *          otherwise blocks are mapped
3843  *
3844  * return = 0, if plain look up failed (blocks have not been allocated)
3845  *          buffer head is unmapped
3846  *
3847  * return < 0, error case.
3848  */
3849 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3850                         struct ext4_map_blocks *map, int flags)
3851 {
3852         struct ext4_ext_path *path = NULL;
3853         struct ext4_extent newex, *ex, *ex2;
3854         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3855         ext4_fsblk_t newblock = 0;
3856         int free_on_err = 0, err = 0, depth, ret;
3857         unsigned int allocated = 0, offset = 0;
3858         unsigned int allocated_clusters = 0;
3859         struct ext4_allocation_request ar;
3860         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3861         ext4_lblk_t cluster_offset;
3862
3863         ext_debug("blocks %u/%u requested for inode %lu\n",
3864                   map->m_lblk, map->m_len, inode->i_ino);
3865         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3866
3867         /* check in cache */
3868         if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3869                 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3870                         if ((sbi->s_cluster_ratio > 1) &&
3871                             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3872                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3873
3874                         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3875                                 /*
3876                                  * block isn't allocated yet and
3877                                  * user doesn't want to allocate it
3878                                  */
3879                                 goto out2;
3880                         }
3881                         /* we should allocate requested block */
3882                 } else {
3883                         /* block is already allocated */
3884                         if (sbi->s_cluster_ratio > 1)
3885                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3886                         newblock = map->m_lblk
3887                                    - le32_to_cpu(newex.ee_block)
3888                                    + ext4_ext_pblock(&newex);
3889                         /* number of remaining blocks in the extent */
3890                         allocated = ext4_ext_get_actual_len(&newex) -
3891                                 (map->m_lblk - le32_to_cpu(newex.ee_block));
3892                         goto out;
3893                 }
3894         }
3895
3896         /* find extent for this block */
3897         path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3898         if (IS_ERR(path)) {
3899                 err = PTR_ERR(path);
3900                 path = NULL;
3901                 goto out2;
3902         }
3903
3904         depth = ext_depth(inode);
3905
3906         /*
3907          * consistent leaf must not be empty;
3908          * this situation is possible, though, _during_ tree modification;
3909          * this is why assert can't be put in ext4_ext_find_extent()
3910          */
3911         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3912                 EXT4_ERROR_INODE(inode, "bad extent address "
3913                                  "lblock: %lu, depth: %d pblock %lld",
3914                                  (unsigned long) map->m_lblk, depth,
3915                                  path[depth].p_block);
3916                 err = -EIO;
3917                 goto out2;
3918         }
3919
3920         ex = path[depth].p_ext;
3921         if (ex) {
3922                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3923                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3924                 unsigned short ee_len;
3925
3926                 /*
3927                  * Uninitialized extents are treated as holes, except that
3928                  * we split out initialized portions during a write.
3929                  */
3930                 ee_len = ext4_ext_get_actual_len(ex);
3931
3932                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3933
3934                 /* if found extent covers block, simply return it */
3935                 if (in_range(map->m_lblk, ee_block, ee_len)) {
3936                         newblock = map->m_lblk - ee_block + ee_start;
3937                         /* number of remaining blocks in the extent */
3938                         allocated = ee_len - (map->m_lblk - ee_block);
3939                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3940                                   ee_block, ee_len, newblock);
3941
3942                         /*
3943                          * Do not put uninitialized extent
3944                          * in the cache
3945                          */
3946                         if (!ext4_ext_is_uninitialized(ex)) {
3947                                 ext4_ext_put_in_cache(inode, ee_block,
3948                                         ee_len, ee_start);
3949                                 goto out;
3950                         }
3951                         ret = ext4_ext_handle_uninitialized_extents(
3952                                 handle, inode, map, path, flags,
3953                                 allocated, newblock);
3954                         return ret;
3955                 }
3956         }
3957
3958         if ((sbi->s_cluster_ratio > 1) &&
3959             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3960                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3961
3962         /*
3963          * requested block isn't allocated yet;
3964          * we couldn't try to create block if create flag is zero
3965          */
3966         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3967                 /*
3968                  * put just found gap into cache to speed up
3969                  * subsequent requests
3970                  */
3971                 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3972                 goto out2;
3973         }
3974
3975         /*
3976          * Okay, we need to do block allocation.
3977          */
3978         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3979         newex.ee_block = cpu_to_le32(map->m_lblk);
3980         cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3981
3982         /*
3983          * If we are doing bigalloc, check to see if the extent returned
3984          * by ext4_ext_find_extent() implies a cluster we can use.
3985          */
3986         if (cluster_offset && ex &&
3987             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
3988                 ar.len = allocated = map->m_len;
3989                 newblock = map->m_pblk;
3990                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3991                 goto got_allocated_blocks;
3992         }
3993
3994         /* find neighbour allocated blocks */
3995         ar.lleft = map->m_lblk;
3996         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3997         if (err)
3998                 goto out2;
3999         ar.lright = map->m_lblk;
4000         ex2 = NULL;
4001         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4002         if (err)
4003                 goto out2;
4004
4005         /* Check if the extent after searching to the right implies a
4006          * cluster we can use. */
4007         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4008             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4009                 ar.len = allocated = map->m_len;
4010                 newblock = map->m_pblk;
4011                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4012                 goto got_allocated_blocks;
4013         }
4014
4015         /*
4016          * See if request is beyond maximum number of blocks we can have in
4017          * a single extent. For an initialized extent this limit is
4018          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4019          * EXT_UNINIT_MAX_LEN.
4020          */
4021         if (map->m_len > EXT_INIT_MAX_LEN &&
4022             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4023                 map->m_len = EXT_INIT_MAX_LEN;
4024         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4025                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4026                 map->m_len = EXT_UNINIT_MAX_LEN;
4027
4028         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4029         newex.ee_len = cpu_to_le16(map->m_len);
4030         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4031         if (err)
4032                 allocated = ext4_ext_get_actual_len(&newex);
4033         else
4034                 allocated = map->m_len;
4035
4036         /* allocate new block */
4037         ar.inode = inode;
4038         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4039         ar.logical = map->m_lblk;
4040         /*
4041          * We calculate the offset from the beginning of the cluster
4042          * for the logical block number, since when we allocate a
4043          * physical cluster, the physical block should start at the
4044          * same offset from the beginning of the cluster.  This is
4045          * needed so that future calls to get_implied_cluster_alloc()
4046          * work correctly.
4047          */
4048         offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4049         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4050         ar.goal -= offset;
4051         ar.logical -= offset;
4052         if (S_ISREG(inode->i_mode))
4053                 ar.flags = EXT4_MB_HINT_DATA;
4054         else
4055                 /* disable in-core preallocation for non-regular files */
4056                 ar.flags = 0;
4057         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4058                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4059         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4060         if (!newblock)
4061                 goto out2;
4062         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4063                   ar.goal, newblock, allocated);
4064         free_on_err = 1;
4065         allocated_clusters = ar.len;
4066         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4067         if (ar.len > allocated)
4068                 ar.len = allocated;
4069
4070 got_allocated_blocks:
4071         /* try to insert new extent into found leaf and return */
4072         ext4_ext_store_pblock(&newex, newblock + offset);
4073         newex.ee_len = cpu_to_le16(ar.len);
4074         /* Mark uninitialized */
4075         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4076                 ext4_ext_mark_uninitialized(&newex);
4077                 /*
4078                  * io_end structure was created for every IO write to an
4079                  * uninitialized extent. To avoid unnecessary conversion,
4080                  * here we flag the IO that really needs the conversion.
4081                  * For non asycn direct IO case, flag the inode state
4082                  * that we need to perform conversion when IO is done.
4083                  */
4084                 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4085                         if (io)
4086                                 ext4_set_io_unwritten_flag(inode, io);
4087                         else
4088                                 ext4_set_inode_state(inode,
4089                                                      EXT4_STATE_DIO_UNWRITTEN);
4090                 }
4091                 if (ext4_should_dioread_nolock(inode))
4092                         map->m_flags |= EXT4_MAP_UNINIT;
4093         }
4094
4095         err = 0;
4096         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4097                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4098                                          path, ar.len);
4099         if (!err)
4100                 err = ext4_ext_insert_extent(handle, inode, path,
4101                                              &newex, flags);
4102         if (err && free_on_err) {
4103                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4104                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4105                 /* free data blocks we just allocated */
4106                 /* not a good idea to call discard here directly,
4107                  * but otherwise we'd need to call it every free() */
4108                 ext4_discard_preallocations(inode);
4109                 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4110                                  ext4_ext_get_actual_len(&newex), fb_flags);
4111                 goto out2;
4112         }
4113
4114         /* previous routine could use block we allocated */
4115         newblock = ext4_ext_pblock(&newex);
4116         allocated = ext4_ext_get_actual_len(&newex);
4117         if (allocated > map->m_len)
4118                 allocated = map->m_len;
4119         map->m_flags |= EXT4_MAP_NEW;
4120
4121         /*
4122          * Update reserved blocks/metadata blocks after successful
4123          * block allocation which had been deferred till now.
4124          */
4125         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4126                 unsigned int reserved_clusters;
4127                 /*
4128                  * Check how many clusters we had reserved this allocated range
4129                  */
4130                 reserved_clusters = get_reserved_cluster_alloc(inode,
4131                                                 map->m_lblk, allocated);
4132                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4133                         if (reserved_clusters) {
4134                                 /*
4135                                  * We have clusters reserved for this range.
4136                                  * But since we are not doing actual allocation
4137                                  * and are simply using blocks from previously
4138                                  * allocated cluster, we should release the
4139                                  * reservation and not claim quota.
4140                                  */
4141                                 ext4_da_update_reserve_space(inode,
4142                                                 reserved_clusters, 0);
4143                         }
4144                 } else {
4145                         BUG_ON(allocated_clusters < reserved_clusters);
4146                         /* We will claim quota for all newly allocated blocks.*/
4147                         ext4_da_update_reserve_space(inode, allocated_clusters,
4148                                                         1);
4149                         if (reserved_clusters < allocated_clusters) {
4150                                 struct ext4_inode_info *ei = EXT4_I(inode);
4151                                 int reservation = allocated_clusters -
4152                                                   reserved_clusters;
4153                                 /*
4154                                  * It seems we claimed few clusters outside of
4155                                  * the range of this allocation. We should give
4156                                  * it back to the reservation pool. This can
4157                                  * happen in the following case:
4158                                  *
4159                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4160                                  *   cluster has 4 blocks. Thus, the clusters
4161                                  *   are [0-3],[4-7],[8-11]...
4162                                  * * First comes delayed allocation write for
4163                                  *   logical blocks 10 & 11. Since there were no
4164                                  *   previous delayed allocated blocks in the
4165                                  *   range [8-11], we would reserve 1 cluster
4166                                  *   for this write.
4167                                  * * Next comes write for logical blocks 3 to 8.
4168                                  *   In this case, we will reserve 2 clusters
4169                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4170                                  *   that range has a delayed allocated blocks.
4171                                  *   Thus total reserved clusters now becomes 3.
4172                                  * * Now, during the delayed allocation writeout
4173                                  *   time, we will first write blocks [3-8] and
4174                                  *   allocate 3 clusters for writing these
4175                                  *   blocks. Also, we would claim all these
4176                                  *   three clusters above.
4177                                  * * Now when we come here to writeout the
4178                                  *   blocks [10-11], we would expect to claim
4179                                  *   the reservation of 1 cluster we had made
4180                                  *   (and we would claim it since there are no
4181                                  *   more delayed allocated blocks in the range
4182                                  *   [8-11]. But our reserved cluster count had
4183                                  *   already gone to 0.
4184                                  *
4185                                  *   Thus, at the step 4 above when we determine
4186                                  *   that there are still some unwritten delayed
4187                                  *   allocated blocks outside of our current
4188                                  *   block range, we should increment the
4189                                  *   reserved clusters count so that when the
4190                                  *   remaining blocks finally gets written, we
4191                                  *   could claim them.
4192                                  */
4193                                 dquot_reserve_block(inode,
4194                                                 EXT4_C2B(sbi, reservation));
4195                                 spin_lock(&ei->i_block_reservation_lock);
4196                                 ei->i_reserved_data_blocks += reservation;
4197                                 spin_unlock(&ei->i_block_reservation_lock);
4198                         }
4199                 }
4200         }
4201
4202         /*
4203          * Cache the extent and update transaction to commit on fdatasync only
4204          * when it is _not_ an uninitialized extent.
4205          */
4206         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4207                 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4208                 ext4_update_inode_fsync_trans(handle, inode, 1);
4209         } else
4210                 ext4_update_inode_fsync_trans(handle, inode, 0);
4211 out:
4212         if (allocated > map->m_len)
4213                 allocated = map->m_len;
4214         ext4_ext_show_leaf(inode, path);
4215         map->m_flags |= EXT4_MAP_MAPPED;
4216         map->m_pblk = newblock;
4217         map->m_len = allocated;
4218 out2:
4219         if (path) {
4220                 ext4_ext_drop_refs(path);
4221                 kfree(path);
4222         }
4223
4224         trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4225                 newblock, map->m_len, err ? err : allocated);
4226
4227         return err ? err : allocated;
4228 }
4229
4230 void ext4_ext_truncate(struct inode *inode)
4231 {
4232         struct address_space *mapping = inode->i_mapping;
4233         struct super_block *sb = inode->i_sb;
4234         ext4_lblk_t last_block;
4235         handle_t *handle;
4236         loff_t page_len;
4237         int err = 0;
4238
4239         /*
4240          * finish any pending end_io work so we won't run the risk of
4241          * converting any truncated blocks to initialized later
4242          */
4243         ext4_flush_completed_IO(inode);
4244
4245         /*
4246          * probably first extent we're gonna free will be last in block
4247          */
4248         err = ext4_writepage_trans_blocks(inode);
4249         handle = ext4_journal_start(inode, err);
4250         if (IS_ERR(handle))
4251                 return;
4252
4253         if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4254                 page_len = PAGE_CACHE_SIZE -
4255                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4256
4257                 err = ext4_discard_partial_page_buffers(handle,
4258                         mapping, inode->i_size, page_len, 0);
4259
4260                 if (err)
4261                         goto out_stop;
4262         }
4263
4264         if (ext4_orphan_add(handle, inode))
4265                 goto out_stop;
4266
4267         down_write(&EXT4_I(inode)->i_data_sem);
4268         ext4_ext_invalidate_cache(inode);
4269
4270         ext4_discard_preallocations(inode);
4271
4272         /*
4273          * TODO: optimization is possible here.
4274          * Probably we need not scan at all,
4275          * because page truncation is enough.
4276          */
4277
4278         /* we have to know where to truncate from in crash case */
4279         EXT4_I(inode)->i_disksize = inode->i_size;
4280         ext4_mark_inode_dirty(handle, inode);
4281
4282         last_block = (inode->i_size + sb->s_blocksize - 1)
4283                         >> EXT4_BLOCK_SIZE_BITS(sb);
4284         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4285
4286         /* In a multi-transaction truncate, we only make the final
4287          * transaction synchronous.
4288          */
4289         if (IS_SYNC(inode))
4290                 ext4_handle_sync(handle);
4291
4292         up_write(&EXT4_I(inode)->i_data_sem);
4293
4294 out_stop:
4295         /*
4296          * If this was a simple ftruncate() and the file will remain alive,
4297          * then we need to clear up the orphan record which we created above.
4298          * However, if this was a real unlink then we were called by
4299          * ext4_delete_inode(), and we allow that function to clean up the
4300          * orphan info for us.
4301          */
4302         if (inode->i_nlink)
4303                 ext4_orphan_del(handle, inode);
4304
4305         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4306         ext4_mark_inode_dirty(handle, inode);
4307         ext4_journal_stop(handle);
4308 }
4309
4310 static void ext4_falloc_update_inode(struct inode *inode,
4311                                 int mode, loff_t new_size, int update_ctime)
4312 {
4313         struct timespec now;
4314
4315         if (update_ctime) {
4316                 now = current_fs_time(inode->i_sb);
4317                 if (!timespec_equal(&inode->i_ctime, &now))
4318                         inode->i_ctime = now;
4319         }
4320         /*
4321          * Update only when preallocation was requested beyond
4322          * the file size.
4323          */
4324         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4325                 if (new_size > i_size_read(inode))
4326                         i_size_write(inode, new_size);
4327                 if (new_size > EXT4_I(inode)->i_disksize)
4328                         ext4_update_i_disksize(inode, new_size);
4329         } else {
4330                 /*
4331                  * Mark that we allocate beyond EOF so the subsequent truncate
4332                  * can proceed even if the new size is the same as i_size.
4333                  */
4334                 if (new_size > i_size_read(inode))
4335                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4336         }
4337
4338 }
4339
4340 /*
4341  * preallocate space for a file. This implements ext4's fallocate file
4342  * operation, which gets called from sys_fallocate system call.
4343  * For block-mapped files, posix_fallocate should fall back to the method
4344  * of writing zeroes to the required new blocks (the same behavior which is
4345  * expected for file systems which do not support fallocate() system call).
4346  */
4347 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4348 {
4349         struct inode *inode = file->f_path.dentry->d_inode;
4350         handle_t *handle;
4351         loff_t new_size;
4352         unsigned int max_blocks;
4353         int ret = 0;
4354         int ret2 = 0;
4355         int retries = 0;
4356         int flags;
4357         struct ext4_map_blocks map;
4358         unsigned int credits, blkbits = inode->i_blkbits;
4359
4360         /*
4361          * currently supporting (pre)allocate mode for extent-based
4362          * files _only_
4363          */
4364         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4365                 return -EOPNOTSUPP;
4366
4367         /* Return error if mode is not supported */
4368         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4369                 return -EOPNOTSUPP;
4370
4371         if (mode & FALLOC_FL_PUNCH_HOLE)
4372                 return ext4_punch_hole(file, offset, len);
4373
4374         trace_ext4_fallocate_enter(inode, offset, len, mode);
4375         map.m_lblk = offset >> blkbits;
4376         /*
4377          * We can't just convert len to max_blocks because
4378          * If blocksize = 4096 offset = 3072 and len = 2048
4379          */
4380         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4381                 - map.m_lblk;
4382         /*
4383          * credits to insert 1 extent into extent tree
4384          */
4385         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4386         mutex_lock(&inode->i_mutex);
4387         ret = inode_newsize_ok(inode, (len + offset));
4388         if (ret) {
4389                 mutex_unlock(&inode->i_mutex);
4390                 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4391                 return ret;
4392         }
4393         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4394         if (mode & FALLOC_FL_KEEP_SIZE)
4395                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4396         /*
4397          * Don't normalize the request if it can fit in one extent so
4398          * that it doesn't get unnecessarily split into multiple
4399          * extents.
4400          */
4401         if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4402                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4403 retry:
4404         while (ret >= 0 && ret < max_blocks) {
4405                 map.m_lblk = map.m_lblk + ret;
4406                 map.m_len = max_blocks = max_blocks - ret;
4407                 handle = ext4_journal_start(inode, credits);
4408                 if (IS_ERR(handle)) {
4409                         ret = PTR_ERR(handle);
4410                         break;
4411                 }
4412                 ret = ext4_map_blocks(handle, inode, &map, flags);
4413                 if (ret <= 0) {
4414 #ifdef EXT4FS_DEBUG
4415                         WARN_ON(ret <= 0);
4416                         printk(KERN_ERR "%s: ext4_ext_map_blocks "
4417                                     "returned error inode#%lu, block=%u, "
4418                                     "max_blocks=%u", __func__,
4419                                     inode->i_ino, map.m_lblk, max_blocks);
4420 #endif
4421                         ext4_mark_inode_dirty(handle, inode);
4422                         ret2 = ext4_journal_stop(handle);
4423                         break;
4424                 }
4425                 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4426                                                 blkbits) >> blkbits))
4427                         new_size = offset + len;
4428                 else
4429                         new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4430
4431                 ext4_falloc_update_inode(inode, mode, new_size,
4432                                          (map.m_flags & EXT4_MAP_NEW));
4433                 ext4_mark_inode_dirty(handle, inode);
4434                 if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4435                         ext4_handle_sync(handle);
4436                 ret2 = ext4_journal_stop(handle);
4437                 if (ret2)
4438                         break;
4439         }
4440         if (ret == -ENOSPC &&
4441                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4442                 ret = 0;
4443                 goto retry;
4444         }
4445         mutex_unlock(&inode->i_mutex);
4446         trace_ext4_fallocate_exit(inode, offset, max_blocks,
4447                                 ret > 0 ? ret2 : ret);
4448         return ret > 0 ? ret2 : ret;
4449 }
4450
4451 /*
4452  * This function convert a range of blocks to written extents
4453  * The caller of this function will pass the start offset and the size.
4454  * all unwritten extents within this range will be converted to
4455  * written extents.
4456  *
4457  * This function is called from the direct IO end io call back
4458  * function, to convert the fallocated extents after IO is completed.
4459  * Returns 0 on success.
4460  */
4461 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4462                                     ssize_t len)
4463 {
4464         handle_t *handle;
4465         unsigned int max_blocks;
4466         int ret = 0;
4467         int ret2 = 0;
4468         struct ext4_map_blocks map;
4469         unsigned int credits, blkbits = inode->i_blkbits;
4470
4471         map.m_lblk = offset >> blkbits;
4472         /*
4473          * We can't just convert len to max_blocks because
4474          * If blocksize = 4096 offset = 3072 and len = 2048
4475          */
4476         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4477                       map.m_lblk);
4478         /*
4479          * credits to insert 1 extent into extent tree
4480          */
4481         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4482         while (ret >= 0 && ret < max_blocks) {
4483                 map.m_lblk += ret;
4484                 map.m_len = (max_blocks -= ret);
4485                 handle = ext4_journal_start(inode, credits);
4486                 if (IS_ERR(handle)) {
4487                         ret = PTR_ERR(handle);
4488                         break;
4489                 }
4490                 ret = ext4_map_blocks(handle, inode, &map,
4491                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4492                 if (ret <= 0) {
4493                         WARN_ON(ret <= 0);
4494                         ext4_msg(inode->i_sb, KERN_ERR,
4495                                  "%s:%d: inode #%lu: block %u: len %u: "
4496                                  "ext4_ext_map_blocks returned %d",
4497                                  __func__, __LINE__, inode->i_ino, map.m_lblk,
4498                                  map.m_len, ret);
4499                 }
4500                 ext4_mark_inode_dirty(handle, inode);
4501                 ret2 = ext4_journal_stop(handle);
4502                 if (ret <= 0 || ret2 )
4503                         break;
4504         }
4505         return ret > 0 ? ret2 : ret;
4506 }
4507
4508 /*
4509  * Callback function called for each extent to gather FIEMAP information.
4510  */
4511 static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4512                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
4513                        void *data)
4514 {
4515         __u64   logical;
4516         __u64   physical;
4517         __u64   length;
4518         __u32   flags = 0;
4519         int             ret = 0;
4520         struct fiemap_extent_info *fieinfo = data;
4521         unsigned char blksize_bits;
4522
4523         blksize_bits = inode->i_sb->s_blocksize_bits;
4524         logical = (__u64)newex->ec_block << blksize_bits;
4525
4526         if (newex->ec_start == 0) {
4527                 /*
4528                  * No extent in extent-tree contains block @newex->ec_start,
4529                  * then the block may stay in 1)a hole or 2)delayed-extent.
4530                  *
4531                  * Holes or delayed-extents are processed as follows.
4532                  * 1. lookup dirty pages with specified range in pagecache.
4533                  *    If no page is got, then there is no delayed-extent and
4534                  *    return with EXT_CONTINUE.
4535                  * 2. find the 1st mapped buffer,
4536                  * 3. check if the mapped buffer is both in the request range
4537                  *    and a delayed buffer. If not, there is no delayed-extent,
4538                  *    then return.
4539                  * 4. a delayed-extent is found, the extent will be collected.
4540                  */
4541                 ext4_lblk_t     end = 0;
4542                 pgoff_t         last_offset;
4543                 pgoff_t         offset;
4544                 pgoff_t         index;
4545                 pgoff_t         start_index = 0;
4546                 struct page     **pages = NULL;
4547                 struct buffer_head *bh = NULL;
4548                 struct buffer_head *head = NULL;
4549                 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4550
4551                 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4552                 if (pages == NULL)
4553                         return -ENOMEM;
4554
4555                 offset = logical >> PAGE_SHIFT;
4556 repeat:
4557                 last_offset = offset;
4558                 head = NULL;
4559                 ret = find_get_pages_tag(inode->i_mapping, &offset,
4560                                         PAGECACHE_TAG_DIRTY, nr_pages, pages);
4561
4562                 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4563                         /* First time, try to find a mapped buffer. */
4564                         if (ret == 0) {
4565 out:
4566                                 for (index = 0; index < ret; index++)
4567                                         page_cache_release(pages[index]);
4568                                 /* just a hole. */
4569                                 kfree(pages);
4570                                 return EXT_CONTINUE;
4571                         }
4572                         index = 0;
4573
4574 next_page:
4575                         /* Try to find the 1st mapped buffer. */
4576                         end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4577                                   blksize_bits;
4578                         if (!page_has_buffers(pages[index]))
4579                                 goto out;
4580                         head = page_buffers(pages[index]);
4581                         if (!head)
4582                                 goto out;
4583
4584                         index++;
4585                         bh = head;
4586                         do {
4587                                 if (end >= newex->ec_block +
4588                                         newex->ec_len)
4589                                         /* The buffer is out of
4590                                          * the request range.
4591                                          */
4592                                         goto out;
4593
4594                                 if (buffer_mapped(bh) &&
4595                                     end >= newex->ec_block) {
4596                                         start_index = index - 1;
4597                                         /* get the 1st mapped buffer. */
4598                                         goto found_mapped_buffer;
4599                                 }
4600
4601                                 bh = bh->b_this_page;
4602                                 end++;
4603                         } while (bh != head);
4604
4605                         /* No mapped buffer in the range found in this page,
4606                          * We need to look up next page.
4607                          */
4608                         if (index >= ret) {
4609                                 /* There is no page left, but we need to limit
4610                                  * newex->ec_len.
4611                                  */
4612                                 newex->ec_len = end - newex->ec_block;
4613                                 goto out;
4614                         }
4615                         goto next_page;
4616                 } else {
4617                         /*Find contiguous delayed buffers. */
4618                         if (ret > 0 && pages[0]->index == last_offset)
4619                                 head = page_buffers(pages[0]);
4620                         bh = head;
4621                         index = 1;
4622                         start_index = 0;
4623                 }
4624
4625 found_mapped_buffer:
4626                 if (bh != NULL && buffer_delay(bh)) {
4627                         /* 1st or contiguous delayed buffer found. */
4628                         if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4629                                 /*
4630                                  * 1st delayed buffer found, record
4631                                  * the start of extent.
4632                                  */
4633                                 flags |= FIEMAP_EXTENT_DELALLOC;
4634                                 newex->ec_block = end;
4635                                 logical = (__u64)end << blksize_bits;
4636                         }
4637                         /* Find contiguous delayed buffers. */
4638                         do {
4639                                 if (!buffer_delay(bh))
4640                                         goto found_delayed_extent;
4641                                 bh = bh->b_this_page;
4642                                 end++;
4643                         } while (bh != head);
4644
4645                         for (; index < ret; index++) {
4646                                 if (!page_has_buffers(pages[index])) {
4647                                         bh = NULL;
4648                                         break;
4649                                 }
4650                                 head = page_buffers(pages[index]);
4651                                 if (!head) {
4652                                         bh = NULL;
4653                                         break;
4654                                 }
4655
4656                                 if (pages[index]->index !=
4657                                     pages[start_index]->index + index
4658                                     - start_index) {
4659                                         /* Blocks are not contiguous. */
4660                                         bh = NULL;
4661                                         break;
4662                                 }
4663                                 bh = head;
4664                                 do {
4665                                         if (!buffer_delay(bh))
4666                                                 /* Delayed-extent ends. */
4667                                                 goto found_delayed_extent;
4668                                         bh = bh->b_this_page;
4669                                         end++;
4670                                 } while (bh != head);
4671                         }
4672                 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4673                         /* a hole found. */
4674                         goto out;
4675
4676 found_delayed_extent:
4677                 newex->ec_len = min(end - newex->ec_block,
4678                                                 (ext4_lblk_t)EXT_INIT_MAX_LEN);
4679                 if (ret == nr_pages && bh != NULL &&
4680                         newex->ec_len < EXT_INIT_MAX_LEN &&
4681                         buffer_delay(bh)) {
4682                         /* Have not collected an extent and continue. */
4683                         for (index = 0; index < ret; index++)
4684                                 page_cache_release(pages[index]);
4685                         goto repeat;
4686                 }
4687
4688                 for (index = 0; index < ret; index++)
4689                         page_cache_release(pages[index]);
4690                 kfree(pages);
4691         }
4692
4693         physical = (__u64)newex->ec_start << blksize_bits;
4694         length =   (__u64)newex->ec_len << blksize_bits;
4695
4696         if (ex && ext4_ext_is_uninitialized(ex))
4697                 flags |= FIEMAP_EXTENT_UNWRITTEN;
4698
4699         if (next == EXT_MAX_BLOCKS)
4700                 flags |= FIEMAP_EXTENT_LAST;
4701
4702         ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4703                                         length, flags);
4704         if (ret < 0)
4705                 return ret;
4706         if (ret == 1)
4707                 return EXT_BREAK;
4708         return EXT_CONTINUE;
4709 }
4710 /* fiemap flags we can handle specified here */
4711 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4712
4713 static int ext4_xattr_fiemap(struct inode *inode,
4714                                 struct fiemap_extent_info *fieinfo)
4715 {
4716         __u64 physical = 0;
4717         __u64 length;
4718         __u32 flags = FIEMAP_EXTENT_LAST;
4719         int blockbits = inode->i_sb->s_blocksize_bits;
4720         int error = 0;
4721
4722         /* in-inode? */
4723         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4724                 struct ext4_iloc iloc;
4725                 int offset;     /* offset of xattr in inode */
4726
4727                 error = ext4_get_inode_loc(inode, &iloc);
4728                 if (error)
4729                         return error;
4730                 physical = iloc.bh->b_blocknr << blockbits;
4731                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4732                                 EXT4_I(inode)->i_extra_isize;
4733                 physical += offset;
4734                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4735                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4736                 brelse(iloc.bh);
4737         } else { /* external block */
4738                 physical = EXT4_I(inode)->i_file_acl << blockbits;
4739                 length = inode->i_sb->s_blocksize;
4740         }
4741
4742         if (physical)
4743                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4744                                                 length, flags);
4745         return (error < 0 ? error : 0);
4746 }
4747
4748 /*
4749  * ext4_ext_punch_hole
4750  *
4751  * Punches a hole of "length" bytes in a file starting
4752  * at byte "offset"
4753  *
4754  * @inode:  The inode of the file to punch a hole in
4755  * @offset: The starting byte offset of the hole
4756  * @length: The length of the hole
4757  *
4758  * Returns the number of blocks removed or negative on err
4759  */
4760 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4761 {
4762         struct inode *inode = file->f_path.dentry->d_inode;
4763         struct super_block *sb = inode->i_sb;
4764         ext4_lblk_t first_block, stop_block;
4765         struct address_space *mapping = inode->i_mapping;
4766         handle_t *handle;
4767         loff_t first_page, last_page, page_len;
4768         loff_t first_page_offset, last_page_offset;
4769         int credits, err = 0;
4770
4771         /* No need to punch hole beyond i_size */
4772         if (offset >= inode->i_size)
4773                 return 0;
4774
4775         /*
4776          * If the hole extends beyond i_size, set the hole
4777          * to end after the page that contains i_size
4778          */
4779         if (offset + length > inode->i_size) {
4780                 length = inode->i_size +
4781                    PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4782                    offset;
4783         }
4784
4785         first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4786         last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4787
4788         first_page_offset = first_page << PAGE_CACHE_SHIFT;
4789         last_page_offset = last_page << PAGE_CACHE_SHIFT;
4790
4791         /*
4792          * Write out all dirty pages to avoid race conditions
4793          * Then release them.
4794          */
4795         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4796                 err = filemap_write_and_wait_range(mapping,
4797                         offset, offset + length - 1);
4798
4799                 if (err)
4800                         return err;
4801         }
4802
4803         /* Now release the pages */
4804         if (last_page_offset > first_page_offset) {
4805                 truncate_pagecache_range(inode, first_page_offset,
4806                                          last_page_offset - 1);
4807         }
4808
4809         /* finish any pending end_io work */
4810         ext4_flush_completed_IO(inode);
4811
4812         credits = ext4_writepage_trans_blocks(inode);
4813         handle = ext4_journal_start(inode, credits);
4814         if (IS_ERR(handle))
4815                 return PTR_ERR(handle);
4816
4817         err = ext4_orphan_add(handle, inode);
4818         if (err)
4819                 goto out;
4820
4821         /*
4822          * Now we need to zero out the non-page-aligned data in the
4823          * pages at the start and tail of the hole, and unmap the buffer
4824          * heads for the block aligned regions of the page that were
4825          * completely zeroed.
4826          */
4827         if (first_page > last_page) {
4828                 /*
4829                  * If the file space being truncated is contained within a page
4830                  * just zero out and unmap the middle of that page
4831                  */
4832                 err = ext4_discard_partial_page_buffers(handle,
4833                         mapping, offset, length, 0);
4834
4835                 if (err)
4836                         goto out;
4837         } else {
4838                 /*
4839                  * zero out and unmap the partial page that contains
4840                  * the start of the hole
4841                  */
4842                 page_len  = first_page_offset - offset;
4843                 if (page_len > 0) {
4844                         err = ext4_discard_partial_page_buffers(handle, mapping,
4845                                                    offset, page_len, 0);
4846                         if (err)
4847                                 goto out;
4848                 }
4849
4850                 /*
4851                  * zero out and unmap the partial page that contains
4852                  * the end of the hole
4853                  */
4854                 page_len = offset + length - last_page_offset;
4855                 if (page_len > 0) {
4856                         err = ext4_discard_partial_page_buffers(handle, mapping,
4857                                         last_page_offset, page_len, 0);
4858                         if (err)
4859                                 goto out;
4860                 }
4861         }
4862
4863         /*
4864          * If i_size is contained in the last page, we need to
4865          * unmap and zero the partial page after i_size
4866          */
4867         if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4868            inode->i_size % PAGE_CACHE_SIZE != 0) {
4869
4870                 page_len = PAGE_CACHE_SIZE -
4871                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4872
4873                 if (page_len > 0) {
4874                         err = ext4_discard_partial_page_buffers(handle,
4875                           mapping, inode->i_size, page_len, 0);
4876
4877                         if (err)
4878                                 goto out;
4879                 }
4880         }
4881
4882         first_block = (offset + sb->s_blocksize - 1) >>
4883                 EXT4_BLOCK_SIZE_BITS(sb);
4884         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4885
4886         /* If there are no blocks to remove, return now */
4887         if (first_block >= stop_block)
4888                 goto out;
4889
4890         down_write(&EXT4_I(inode)->i_data_sem);
4891         ext4_ext_invalidate_cache(inode);
4892         ext4_discard_preallocations(inode);
4893
4894         err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4895
4896         ext4_ext_invalidate_cache(inode);
4897         ext4_discard_preallocations(inode);
4898
4899         if (IS_SYNC(inode))
4900                 ext4_handle_sync(handle);
4901
4902         up_write(&EXT4_I(inode)->i_data_sem);
4903
4904 out:
4905         ext4_orphan_del(handle, inode);
4906         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4907         ext4_mark_inode_dirty(handle, inode);
4908         ext4_journal_stop(handle);
4909         return err;
4910 }
4911 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4912                 __u64 start, __u64 len)
4913 {
4914         ext4_lblk_t start_blk;
4915         int error = 0;
4916
4917         /* fallback to generic here if not in extents fmt */
4918         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4919                 return generic_block_fiemap(inode, fieinfo, start, len,
4920                         ext4_get_block);
4921
4922         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4923                 return -EBADR;
4924
4925         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4926                 error = ext4_xattr_fiemap(inode, fieinfo);
4927         } else {
4928                 ext4_lblk_t len_blks;
4929                 __u64 last_blk;
4930
4931                 start_blk = start >> inode->i_sb->s_blocksize_bits;
4932                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4933                 if (last_blk >= EXT_MAX_BLOCKS)
4934                         last_blk = EXT_MAX_BLOCKS-1;
4935                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4936
4937                 /*
4938                  * Walk the extent tree gathering extent information.
4939                  * ext4_ext_fiemap_cb will push extents back to user.
4940                  */
4941                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
4942                                           ext4_ext_fiemap_cb, fieinfo);
4943         }
4944
4945         return error;
4946 }