Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <asm/uaccess.h>
41 #include <linux/fiemap.h>
42 #include "ext4_jbd2.h"
43 #include "ext4_extents.h"
44 #include "xattr.h"
45
46 #include <trace/events/ext4.h>
47
48 /*
49  * used by extent splitting.
50  */
51 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
52                                         due to ENOSPC */
53 #define EXT4_EXT_MARK_UNWRIT1   0x2  /* mark first half unwritten */
54 #define EXT4_EXT_MARK_UNWRIT2   0x4  /* mark second half unwritten */
55
56 #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
57 #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
58
59 static __le32 ext4_extent_block_csum(struct inode *inode,
60                                      struct ext4_extent_header *eh)
61 {
62         struct ext4_inode_info *ei = EXT4_I(inode);
63         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
64         __u32 csum;
65
66         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
67                            EXT4_EXTENT_TAIL_OFFSET(eh));
68         return cpu_to_le32(csum);
69 }
70
71 static int ext4_extent_block_csum_verify(struct inode *inode,
72                                          struct ext4_extent_header *eh)
73 {
74         struct ext4_extent_tail *et;
75
76         if (!ext4_has_metadata_csum(inode->i_sb))
77                 return 1;
78
79         et = find_ext4_extent_tail(eh);
80         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
81                 return 0;
82         return 1;
83 }
84
85 static void ext4_extent_block_csum_set(struct inode *inode,
86                                        struct ext4_extent_header *eh)
87 {
88         struct ext4_extent_tail *et;
89
90         if (!ext4_has_metadata_csum(inode->i_sb))
91                 return;
92
93         et = find_ext4_extent_tail(eh);
94         et->et_checksum = ext4_extent_block_csum(inode, eh);
95 }
96
97 static int ext4_split_extent(handle_t *handle,
98                                 struct inode *inode,
99                                 struct ext4_ext_path **ppath,
100                                 struct ext4_map_blocks *map,
101                                 int split_flag,
102                                 int flags);
103
104 static int ext4_split_extent_at(handle_t *handle,
105                              struct inode *inode,
106                              struct ext4_ext_path **ppath,
107                              ext4_lblk_t split,
108                              int split_flag,
109                              int flags);
110
111 static int ext4_find_delayed_extent(struct inode *inode,
112                                     struct extent_status *newes);
113
114 static int ext4_ext_truncate_extend_restart(handle_t *handle,
115                                             struct inode *inode,
116                                             int needed)
117 {
118         int err;
119
120         if (!ext4_handle_valid(handle))
121                 return 0;
122         if (handle->h_buffer_credits > needed)
123                 return 0;
124         err = ext4_journal_extend(handle, needed);
125         if (err <= 0)
126                 return err;
127         err = ext4_truncate_restart_trans(handle, inode, needed);
128         if (err == 0)
129                 err = -EAGAIN;
130
131         return err;
132 }
133
134 /*
135  * could return:
136  *  - EROFS
137  *  - ENOMEM
138  */
139 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
140                                 struct ext4_ext_path *path)
141 {
142         if (path->p_bh) {
143                 /* path points to block */
144                 BUFFER_TRACE(path->p_bh, "get_write_access");
145                 return ext4_journal_get_write_access(handle, path->p_bh);
146         }
147         /* path points to leaf/index in inode body */
148         /* we use in-core data, no need to protect them */
149         return 0;
150 }
151
152 /*
153  * could return:
154  *  - EROFS
155  *  - ENOMEM
156  *  - EIO
157  */
158 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
159                      struct inode *inode, struct ext4_ext_path *path)
160 {
161         int err;
162
163         WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
164         if (path->p_bh) {
165                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
166                 /* path points to block */
167                 err = __ext4_handle_dirty_metadata(where, line, handle,
168                                                    inode, path->p_bh);
169         } else {
170                 /* path points to leaf/index in inode body */
171                 err = ext4_mark_inode_dirty(handle, inode);
172         }
173         return err;
174 }
175
176 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
177                               struct ext4_ext_path *path,
178                               ext4_lblk_t block)
179 {
180         if (path) {
181                 int depth = path->p_depth;
182                 struct ext4_extent *ex;
183
184                 /*
185                  * Try to predict block placement assuming that we are
186                  * filling in a file which will eventually be
187                  * non-sparse --- i.e., in the case of libbfd writing
188                  * an ELF object sections out-of-order but in a way
189                  * the eventually results in a contiguous object or
190                  * executable file, or some database extending a table
191                  * space file.  However, this is actually somewhat
192                  * non-ideal if we are writing a sparse file such as
193                  * qemu or KVM writing a raw image file that is going
194                  * to stay fairly sparse, since it will end up
195                  * fragmenting the file system's free space.  Maybe we
196                  * should have some hueristics or some way to allow
197                  * userspace to pass a hint to file system,
198                  * especially if the latter case turns out to be
199                  * common.
200                  */
201                 ex = path[depth].p_ext;
202                 if (ex) {
203                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
204                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
205
206                         if (block > ext_block)
207                                 return ext_pblk + (block - ext_block);
208                         else
209                                 return ext_pblk - (ext_block - block);
210                 }
211
212                 /* it looks like index is empty;
213                  * try to find starting block from index itself */
214                 if (path[depth].p_bh)
215                         return path[depth].p_bh->b_blocknr;
216         }
217
218         /* OK. use inode's group */
219         return ext4_inode_to_goal_block(inode);
220 }
221
222 /*
223  * Allocation for a meta data block
224  */
225 static ext4_fsblk_t
226 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
227                         struct ext4_ext_path *path,
228                         struct ext4_extent *ex, int *err, unsigned int flags)
229 {
230         ext4_fsblk_t goal, newblock;
231
232         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
233         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
234                                         NULL, err);
235         return newblock;
236 }
237
238 static inline int ext4_ext_space_block(struct inode *inode, int check)
239 {
240         int size;
241
242         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
243                         / sizeof(struct ext4_extent);
244 #ifdef AGGRESSIVE_TEST
245         if (!check && size > 6)
246                 size = 6;
247 #endif
248         return size;
249 }
250
251 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
252 {
253         int size;
254
255         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
256                         / sizeof(struct ext4_extent_idx);
257 #ifdef AGGRESSIVE_TEST
258         if (!check && size > 5)
259                 size = 5;
260 #endif
261         return size;
262 }
263
264 static inline int ext4_ext_space_root(struct inode *inode, int check)
265 {
266         int size;
267
268         size = sizeof(EXT4_I(inode)->i_data);
269         size -= sizeof(struct ext4_extent_header);
270         size /= sizeof(struct ext4_extent);
271 #ifdef AGGRESSIVE_TEST
272         if (!check && size > 3)
273                 size = 3;
274 #endif
275         return size;
276 }
277
278 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
279 {
280         int size;
281
282         size = sizeof(EXT4_I(inode)->i_data);
283         size -= sizeof(struct ext4_extent_header);
284         size /= sizeof(struct ext4_extent_idx);
285 #ifdef AGGRESSIVE_TEST
286         if (!check && size > 4)
287                 size = 4;
288 #endif
289         return size;
290 }
291
292 static inline int
293 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
294                            struct ext4_ext_path **ppath, ext4_lblk_t lblk,
295                            int nofail)
296 {
297         struct ext4_ext_path *path = *ppath;
298         int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
299
300         return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
301                         EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
302                         EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
303                         (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
304 }
305
306 /*
307  * Calculate the number of metadata blocks needed
308  * to allocate @blocks
309  * Worse case is one block per extent
310  */
311 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
312 {
313         struct ext4_inode_info *ei = EXT4_I(inode);
314         int idxs;
315
316         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
317                 / sizeof(struct ext4_extent_idx));
318
319         /*
320          * If the new delayed allocation block is contiguous with the
321          * previous da block, it can share index blocks with the
322          * previous block, so we only need to allocate a new index
323          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
324          * an additional index block, and at ldxs**3 blocks, yet
325          * another index blocks.
326          */
327         if (ei->i_da_metadata_calc_len &&
328             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
329                 int num = 0;
330
331                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
332                         num++;
333                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
334                         num++;
335                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
336                         num++;
337                         ei->i_da_metadata_calc_len = 0;
338                 } else
339                         ei->i_da_metadata_calc_len++;
340                 ei->i_da_metadata_calc_last_lblock++;
341                 return num;
342         }
343
344         /*
345          * In the worst case we need a new set of index blocks at
346          * every level of the inode's extent tree.
347          */
348         ei->i_da_metadata_calc_len = 1;
349         ei->i_da_metadata_calc_last_lblock = lblock;
350         return ext_depth(inode) + 1;
351 }
352
353 static int
354 ext4_ext_max_entries(struct inode *inode, int depth)
355 {
356         int max;
357
358         if (depth == ext_depth(inode)) {
359                 if (depth == 0)
360                         max = ext4_ext_space_root(inode, 1);
361                 else
362                         max = ext4_ext_space_root_idx(inode, 1);
363         } else {
364                 if (depth == 0)
365                         max = ext4_ext_space_block(inode, 1);
366                 else
367                         max = ext4_ext_space_block_idx(inode, 1);
368         }
369
370         return max;
371 }
372
373 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
374 {
375         ext4_fsblk_t block = ext4_ext_pblock(ext);
376         int len = ext4_ext_get_actual_len(ext);
377         ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
378         ext4_lblk_t last = lblock + len - 1;
379
380         if (lblock > last)
381                 return 0;
382         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
383 }
384
385 static int ext4_valid_extent_idx(struct inode *inode,
386                                 struct ext4_extent_idx *ext_idx)
387 {
388         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
389
390         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
391 }
392
393 static int ext4_valid_extent_entries(struct inode *inode,
394                                 struct ext4_extent_header *eh,
395                                 int depth)
396 {
397         unsigned short entries;
398         if (eh->eh_entries == 0)
399                 return 1;
400
401         entries = le16_to_cpu(eh->eh_entries);
402
403         if (depth == 0) {
404                 /* leaf entries */
405                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
406                 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
407                 ext4_fsblk_t pblock = 0;
408                 ext4_lblk_t lblock = 0;
409                 ext4_lblk_t prev = 0;
410                 int len = 0;
411                 while (entries) {
412                         if (!ext4_valid_extent(inode, ext))
413                                 return 0;
414
415                         /* Check for overlapping extents */
416                         lblock = le32_to_cpu(ext->ee_block);
417                         len = ext4_ext_get_actual_len(ext);
418                         if ((lblock <= prev) && prev) {
419                                 pblock = ext4_ext_pblock(ext);
420                                 es->s_last_error_block = cpu_to_le64(pblock);
421                                 return 0;
422                         }
423                         ext++;
424                         entries--;
425                         prev = lblock + len - 1;
426                 }
427         } else {
428                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
429                 while (entries) {
430                         if (!ext4_valid_extent_idx(inode, ext_idx))
431                                 return 0;
432                         ext_idx++;
433                         entries--;
434                 }
435         }
436         return 1;
437 }
438
439 static int __ext4_ext_check(const char *function, unsigned int line,
440                             struct inode *inode, struct ext4_extent_header *eh,
441                             int depth, ext4_fsblk_t pblk)
442 {
443         const char *error_msg;
444         int max = 0;
445
446         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
447                 error_msg = "invalid magic";
448                 goto corrupted;
449         }
450         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
451                 error_msg = "unexpected eh_depth";
452                 goto corrupted;
453         }
454         if (unlikely(eh->eh_max == 0)) {
455                 error_msg = "invalid eh_max";
456                 goto corrupted;
457         }
458         max = ext4_ext_max_entries(inode, depth);
459         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
460                 error_msg = "too large eh_max";
461                 goto corrupted;
462         }
463         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
464                 error_msg = "invalid eh_entries";
465                 goto corrupted;
466         }
467         if (!ext4_valid_extent_entries(inode, eh, depth)) {
468                 error_msg = "invalid extent entries";
469                 goto corrupted;
470         }
471         /* Verify checksum on non-root extent tree nodes */
472         if (ext_depth(inode) != depth &&
473             !ext4_extent_block_csum_verify(inode, eh)) {
474                 error_msg = "extent tree corrupted";
475                 goto corrupted;
476         }
477         return 0;
478
479 corrupted:
480         ext4_error_inode(inode, function, line, 0,
481                          "pblk %llu bad header/extent: %s - magic %x, "
482                          "entries %u, max %u(%u), depth %u(%u)",
483                          (unsigned long long) pblk, error_msg,
484                          le16_to_cpu(eh->eh_magic),
485                          le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
486                          max, le16_to_cpu(eh->eh_depth), depth);
487         return -EIO;
488 }
489
490 #define ext4_ext_check(inode, eh, depth, pblk)                  \
491         __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
492
493 int ext4_ext_check_inode(struct inode *inode)
494 {
495         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
496 }
497
498 static struct buffer_head *
499 __read_extent_tree_block(const char *function, unsigned int line,
500                          struct inode *inode, ext4_fsblk_t pblk, int depth,
501                          int flags)
502 {
503         struct buffer_head              *bh;
504         int                             err;
505
506         bh = sb_getblk(inode->i_sb, pblk);
507         if (unlikely(!bh))
508                 return ERR_PTR(-ENOMEM);
509
510         if (!bh_uptodate_or_lock(bh)) {
511                 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
512                 err = bh_submit_read(bh);
513                 if (err < 0)
514                         goto errout;
515         }
516         if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
517                 return bh;
518         err = __ext4_ext_check(function, line, inode,
519                                ext_block_hdr(bh), depth, pblk);
520         if (err)
521                 goto errout;
522         set_buffer_verified(bh);
523         /*
524          * If this is a leaf block, cache all of its entries
525          */
526         if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
527                 struct ext4_extent_header *eh = ext_block_hdr(bh);
528                 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
529                 ext4_lblk_t prev = 0;
530                 int i;
531
532                 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
533                         unsigned int status = EXTENT_STATUS_WRITTEN;
534                         ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
535                         int len = ext4_ext_get_actual_len(ex);
536
537                         if (prev && (prev != lblk))
538                                 ext4_es_cache_extent(inode, prev,
539                                                      lblk - prev, ~0,
540                                                      EXTENT_STATUS_HOLE);
541
542                         if (ext4_ext_is_unwritten(ex))
543                                 status = EXTENT_STATUS_UNWRITTEN;
544                         ext4_es_cache_extent(inode, lblk, len,
545                                              ext4_ext_pblock(ex), status);
546                         prev = lblk + len;
547                 }
548         }
549         return bh;
550 errout:
551         put_bh(bh);
552         return ERR_PTR(err);
553
554 }
555
556 #define read_extent_tree_block(inode, pblk, depth, flags)               \
557         __read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
558                                  (depth), (flags))
559
560 /*
561  * This function is called to cache a file's extent information in the
562  * extent status tree
563  */
564 int ext4_ext_precache(struct inode *inode)
565 {
566         struct ext4_inode_info *ei = EXT4_I(inode);
567         struct ext4_ext_path *path = NULL;
568         struct buffer_head *bh;
569         int i = 0, depth, ret = 0;
570
571         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
572                 return 0;       /* not an extent-mapped inode */
573
574         down_read(&ei->i_data_sem);
575         depth = ext_depth(inode);
576
577         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
578                        GFP_NOFS);
579         if (path == NULL) {
580                 up_read(&ei->i_data_sem);
581                 return -ENOMEM;
582         }
583
584         /* Don't cache anything if there are no external extent blocks */
585         if (depth == 0)
586                 goto out;
587         path[0].p_hdr = ext_inode_hdr(inode);
588         ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
589         if (ret)
590                 goto out;
591         path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
592         while (i >= 0) {
593                 /*
594                  * If this is a leaf block or we've reached the end of
595                  * the index block, go up
596                  */
597                 if ((i == depth) ||
598                     path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
599                         brelse(path[i].p_bh);
600                         path[i].p_bh = NULL;
601                         i--;
602                         continue;
603                 }
604                 bh = read_extent_tree_block(inode,
605                                             ext4_idx_pblock(path[i].p_idx++),
606                                             depth - i - 1,
607                                             EXT4_EX_FORCE_CACHE);
608                 if (IS_ERR(bh)) {
609                         ret = PTR_ERR(bh);
610                         break;
611                 }
612                 i++;
613                 path[i].p_bh = bh;
614                 path[i].p_hdr = ext_block_hdr(bh);
615                 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
616         }
617         ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
618 out:
619         up_read(&ei->i_data_sem);
620         ext4_ext_drop_refs(path);
621         kfree(path);
622         return ret;
623 }
624
625 #ifdef EXT_DEBUG
626 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
627 {
628         int k, l = path->p_depth;
629
630         ext_debug("path:");
631         for (k = 0; k <= l; k++, path++) {
632                 if (path->p_idx) {
633                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
634                             ext4_idx_pblock(path->p_idx));
635                 } else if (path->p_ext) {
636                         ext_debug("  %d:[%d]%d:%llu ",
637                                   le32_to_cpu(path->p_ext->ee_block),
638                                   ext4_ext_is_unwritten(path->p_ext),
639                                   ext4_ext_get_actual_len(path->p_ext),
640                                   ext4_ext_pblock(path->p_ext));
641                 } else
642                         ext_debug("  []");
643         }
644         ext_debug("\n");
645 }
646
647 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
648 {
649         int depth = ext_depth(inode);
650         struct ext4_extent_header *eh;
651         struct ext4_extent *ex;
652         int i;
653
654         if (!path)
655                 return;
656
657         eh = path[depth].p_hdr;
658         ex = EXT_FIRST_EXTENT(eh);
659
660         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
661
662         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
663                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
664                           ext4_ext_is_unwritten(ex),
665                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
666         }
667         ext_debug("\n");
668 }
669
670 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
671                         ext4_fsblk_t newblock, int level)
672 {
673         int depth = ext_depth(inode);
674         struct ext4_extent *ex;
675
676         if (depth != level) {
677                 struct ext4_extent_idx *idx;
678                 idx = path[level].p_idx;
679                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
680                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
681                                         le32_to_cpu(idx->ei_block),
682                                         ext4_idx_pblock(idx),
683                                         newblock);
684                         idx++;
685                 }
686
687                 return;
688         }
689
690         ex = path[depth].p_ext;
691         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
692                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
693                                 le32_to_cpu(ex->ee_block),
694                                 ext4_ext_pblock(ex),
695                                 ext4_ext_is_unwritten(ex),
696                                 ext4_ext_get_actual_len(ex),
697                                 newblock);
698                 ex++;
699         }
700 }
701
702 #else
703 #define ext4_ext_show_path(inode, path)
704 #define ext4_ext_show_leaf(inode, path)
705 #define ext4_ext_show_move(inode, path, newblock, level)
706 #endif
707
708 void ext4_ext_drop_refs(struct ext4_ext_path *path)
709 {
710         int depth, i;
711
712         if (!path)
713                 return;
714         depth = path->p_depth;
715         for (i = 0; i <= depth; i++, path++)
716                 if (path->p_bh) {
717                         brelse(path->p_bh);
718                         path->p_bh = NULL;
719                 }
720 }
721
722 /*
723  * ext4_ext_binsearch_idx:
724  * binary search for the closest index of the given block
725  * the header must be checked before calling this
726  */
727 static void
728 ext4_ext_binsearch_idx(struct inode *inode,
729                         struct ext4_ext_path *path, ext4_lblk_t block)
730 {
731         struct ext4_extent_header *eh = path->p_hdr;
732         struct ext4_extent_idx *r, *l, *m;
733
734
735         ext_debug("binsearch for %u(idx):  ", block);
736
737         l = EXT_FIRST_INDEX(eh) + 1;
738         r = EXT_LAST_INDEX(eh);
739         while (l <= r) {
740                 m = l + (r - l) / 2;
741                 if (block < le32_to_cpu(m->ei_block))
742                         r = m - 1;
743                 else
744                         l = m + 1;
745                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
746                                 m, le32_to_cpu(m->ei_block),
747                                 r, le32_to_cpu(r->ei_block));
748         }
749
750         path->p_idx = l - 1;
751         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
752                   ext4_idx_pblock(path->p_idx));
753
754 #ifdef CHECK_BINSEARCH
755         {
756                 struct ext4_extent_idx *chix, *ix;
757                 int k;
758
759                 chix = ix = EXT_FIRST_INDEX(eh);
760                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
761                   if (k != 0 &&
762                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
763                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
764                                        "first=0x%p\n", k,
765                                        ix, EXT_FIRST_INDEX(eh));
766                                 printk(KERN_DEBUG "%u <= %u\n",
767                                        le32_to_cpu(ix->ei_block),
768                                        le32_to_cpu(ix[-1].ei_block));
769                         }
770                         BUG_ON(k && le32_to_cpu(ix->ei_block)
771                                            <= le32_to_cpu(ix[-1].ei_block));
772                         if (block < le32_to_cpu(ix->ei_block))
773                                 break;
774                         chix = ix;
775                 }
776                 BUG_ON(chix != path->p_idx);
777         }
778 #endif
779
780 }
781
782 /*
783  * ext4_ext_binsearch:
784  * binary search for closest extent of the given block
785  * the header must be checked before calling this
786  */
787 static void
788 ext4_ext_binsearch(struct inode *inode,
789                 struct ext4_ext_path *path, ext4_lblk_t block)
790 {
791         struct ext4_extent_header *eh = path->p_hdr;
792         struct ext4_extent *r, *l, *m;
793
794         if (eh->eh_entries == 0) {
795                 /*
796                  * this leaf is empty:
797                  * we get such a leaf in split/add case
798                  */
799                 return;
800         }
801
802         ext_debug("binsearch for %u:  ", block);
803
804         l = EXT_FIRST_EXTENT(eh) + 1;
805         r = EXT_LAST_EXTENT(eh);
806
807         while (l <= r) {
808                 m = l + (r - l) / 2;
809                 if (block < le32_to_cpu(m->ee_block))
810                         r = m - 1;
811                 else
812                         l = m + 1;
813                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
814                                 m, le32_to_cpu(m->ee_block),
815                                 r, le32_to_cpu(r->ee_block));
816         }
817
818         path->p_ext = l - 1;
819         ext_debug("  -> %d:%llu:[%d]%d ",
820                         le32_to_cpu(path->p_ext->ee_block),
821                         ext4_ext_pblock(path->p_ext),
822                         ext4_ext_is_unwritten(path->p_ext),
823                         ext4_ext_get_actual_len(path->p_ext));
824
825 #ifdef CHECK_BINSEARCH
826         {
827                 struct ext4_extent *chex, *ex;
828                 int k;
829
830                 chex = ex = EXT_FIRST_EXTENT(eh);
831                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
832                         BUG_ON(k && le32_to_cpu(ex->ee_block)
833                                           <= le32_to_cpu(ex[-1].ee_block));
834                         if (block < le32_to_cpu(ex->ee_block))
835                                 break;
836                         chex = ex;
837                 }
838                 BUG_ON(chex != path->p_ext);
839         }
840 #endif
841
842 }
843
844 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
845 {
846         struct ext4_extent_header *eh;
847
848         eh = ext_inode_hdr(inode);
849         eh->eh_depth = 0;
850         eh->eh_entries = 0;
851         eh->eh_magic = EXT4_EXT_MAGIC;
852         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
853         ext4_mark_inode_dirty(handle, inode);
854         return 0;
855 }
856
857 struct ext4_ext_path *
858 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
859                  struct ext4_ext_path **orig_path, int flags)
860 {
861         struct ext4_extent_header *eh;
862         struct buffer_head *bh;
863         struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
864         short int depth, i, ppos = 0;
865         int ret;
866
867         eh = ext_inode_hdr(inode);
868         depth = ext_depth(inode);
869
870         if (path) {
871                 ext4_ext_drop_refs(path);
872                 if (depth > path[0].p_maxdepth) {
873                         kfree(path);
874                         *orig_path = path = NULL;
875                 }
876         }
877         if (!path) {
878                 /* account possible depth increase */
879                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
880                                 GFP_NOFS);
881                 if (unlikely(!path))
882                         return ERR_PTR(-ENOMEM);
883                 path[0].p_maxdepth = depth + 1;
884         }
885         path[0].p_hdr = eh;
886         path[0].p_bh = NULL;
887
888         i = depth;
889         /* walk through the tree */
890         while (i) {
891                 ext_debug("depth %d: num %d, max %d\n",
892                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
893
894                 ext4_ext_binsearch_idx(inode, path + ppos, block);
895                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
896                 path[ppos].p_depth = i;
897                 path[ppos].p_ext = NULL;
898
899                 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
900                                             flags);
901                 if (unlikely(IS_ERR(bh))) {
902                         ret = PTR_ERR(bh);
903                         goto err;
904                 }
905
906                 eh = ext_block_hdr(bh);
907                 ppos++;
908                 if (unlikely(ppos > depth)) {
909                         put_bh(bh);
910                         EXT4_ERROR_INODE(inode,
911                                          "ppos %d > depth %d", ppos, depth);
912                         ret = -EIO;
913                         goto err;
914                 }
915                 path[ppos].p_bh = bh;
916                 path[ppos].p_hdr = eh;
917         }
918
919         path[ppos].p_depth = i;
920         path[ppos].p_ext = NULL;
921         path[ppos].p_idx = NULL;
922
923         /* find extent */
924         ext4_ext_binsearch(inode, path + ppos, block);
925         /* if not an empty leaf */
926         if (path[ppos].p_ext)
927                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
928
929         ext4_ext_show_path(inode, path);
930
931         return path;
932
933 err:
934         ext4_ext_drop_refs(path);
935         kfree(path);
936         if (orig_path)
937                 *orig_path = NULL;
938         return ERR_PTR(ret);
939 }
940
941 /*
942  * ext4_ext_insert_index:
943  * insert new index [@logical;@ptr] into the block at @curp;
944  * check where to insert: before @curp or after @curp
945  */
946 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
947                                  struct ext4_ext_path *curp,
948                                  int logical, ext4_fsblk_t ptr)
949 {
950         struct ext4_extent_idx *ix;
951         int len, err;
952
953         err = ext4_ext_get_access(handle, inode, curp);
954         if (err)
955                 return err;
956
957         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
958                 EXT4_ERROR_INODE(inode,
959                                  "logical %d == ei_block %d!",
960                                  logical, le32_to_cpu(curp->p_idx->ei_block));
961                 return -EIO;
962         }
963
964         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
965                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
966                 EXT4_ERROR_INODE(inode,
967                                  "eh_entries %d >= eh_max %d!",
968                                  le16_to_cpu(curp->p_hdr->eh_entries),
969                                  le16_to_cpu(curp->p_hdr->eh_max));
970                 return -EIO;
971         }
972
973         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
974                 /* insert after */
975                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
976                 ix = curp->p_idx + 1;
977         } else {
978                 /* insert before */
979                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
980                 ix = curp->p_idx;
981         }
982
983         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
984         BUG_ON(len < 0);
985         if (len > 0) {
986                 ext_debug("insert new index %d: "
987                                 "move %d indices from 0x%p to 0x%p\n",
988                                 logical, len, ix, ix + 1);
989                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
990         }
991
992         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
993                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
994                 return -EIO;
995         }
996
997         ix->ei_block = cpu_to_le32(logical);
998         ext4_idx_store_pblock(ix, ptr);
999         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1000
1001         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1002                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1003                 return -EIO;
1004         }
1005
1006         err = ext4_ext_dirty(handle, inode, curp);
1007         ext4_std_error(inode->i_sb, err);
1008
1009         return err;
1010 }
1011
1012 /*
1013  * ext4_ext_split:
1014  * inserts new subtree into the path, using free index entry
1015  * at depth @at:
1016  * - allocates all needed blocks (new leaf and all intermediate index blocks)
1017  * - makes decision where to split
1018  * - moves remaining extents and index entries (right to the split point)
1019  *   into the newly allocated blocks
1020  * - initializes subtree
1021  */
1022 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1023                           unsigned int flags,
1024                           struct ext4_ext_path *path,
1025                           struct ext4_extent *newext, int at)
1026 {
1027         struct buffer_head *bh = NULL;
1028         int depth = ext_depth(inode);
1029         struct ext4_extent_header *neh;
1030         struct ext4_extent_idx *fidx;
1031         int i = at, k, m, a;
1032         ext4_fsblk_t newblock, oldblock;
1033         __le32 border;
1034         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1035         int err = 0;
1036
1037         /* make decision: where to split? */
1038         /* FIXME: now decision is simplest: at current extent */
1039
1040         /* if current leaf will be split, then we should use
1041          * border from split point */
1042         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1043                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1044                 return -EIO;
1045         }
1046         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1047                 border = path[depth].p_ext[1].ee_block;
1048                 ext_debug("leaf will be split."
1049                                 " next leaf starts at %d\n",
1050                                   le32_to_cpu(border));
1051         } else {
1052                 border = newext->ee_block;
1053                 ext_debug("leaf will be added."
1054                                 " next leaf starts at %d\n",
1055                                 le32_to_cpu(border));
1056         }
1057
1058         /*
1059          * If error occurs, then we break processing
1060          * and mark filesystem read-only. index won't
1061          * be inserted and tree will be in consistent
1062          * state. Next mount will repair buffers too.
1063          */
1064
1065         /*
1066          * Get array to track all allocated blocks.
1067          * We need this to handle errors and free blocks
1068          * upon them.
1069          */
1070         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
1071         if (!ablocks)
1072                 return -ENOMEM;
1073
1074         /* allocate all needed blocks */
1075         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1076         for (a = 0; a < depth - at; a++) {
1077                 newblock = ext4_ext_new_meta_block(handle, inode, path,
1078                                                    newext, &err, flags);
1079                 if (newblock == 0)
1080                         goto cleanup;
1081                 ablocks[a] = newblock;
1082         }
1083
1084         /* initialize new leaf */
1085         newblock = ablocks[--a];
1086         if (unlikely(newblock == 0)) {
1087                 EXT4_ERROR_INODE(inode, "newblock == 0!");
1088                 err = -EIO;
1089                 goto cleanup;
1090         }
1091         bh = sb_getblk(inode->i_sb, newblock);
1092         if (unlikely(!bh)) {
1093                 err = -ENOMEM;
1094                 goto cleanup;
1095         }
1096         lock_buffer(bh);
1097
1098         err = ext4_journal_get_create_access(handle, bh);
1099         if (err)
1100                 goto cleanup;
1101
1102         neh = ext_block_hdr(bh);
1103         neh->eh_entries = 0;
1104         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1105         neh->eh_magic = EXT4_EXT_MAGIC;
1106         neh->eh_depth = 0;
1107
1108         /* move remainder of path[depth] to the new leaf */
1109         if (unlikely(path[depth].p_hdr->eh_entries !=
1110                      path[depth].p_hdr->eh_max)) {
1111                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1112                                  path[depth].p_hdr->eh_entries,
1113                                  path[depth].p_hdr->eh_max);
1114                 err = -EIO;
1115                 goto cleanup;
1116         }
1117         /* start copy from next extent */
1118         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1119         ext4_ext_show_move(inode, path, newblock, depth);
1120         if (m) {
1121                 struct ext4_extent *ex;
1122                 ex = EXT_FIRST_EXTENT(neh);
1123                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1124                 le16_add_cpu(&neh->eh_entries, m);
1125         }
1126
1127         ext4_extent_block_csum_set(inode, neh);
1128         set_buffer_uptodate(bh);
1129         unlock_buffer(bh);
1130
1131         err = ext4_handle_dirty_metadata(handle, inode, bh);
1132         if (err)
1133                 goto cleanup;
1134         brelse(bh);
1135         bh = NULL;
1136
1137         /* correct old leaf */
1138         if (m) {
1139                 err = ext4_ext_get_access(handle, inode, path + depth);
1140                 if (err)
1141                         goto cleanup;
1142                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1143                 err = ext4_ext_dirty(handle, inode, path + depth);
1144                 if (err)
1145                         goto cleanup;
1146
1147         }
1148
1149         /* create intermediate indexes */
1150         k = depth - at - 1;
1151         if (unlikely(k < 0)) {
1152                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1153                 err = -EIO;
1154                 goto cleanup;
1155         }
1156         if (k)
1157                 ext_debug("create %d intermediate indices\n", k);
1158         /* insert new index into current index block */
1159         /* current depth stored in i var */
1160         i = depth - 1;
1161         while (k--) {
1162                 oldblock = newblock;
1163                 newblock = ablocks[--a];
1164                 bh = sb_getblk(inode->i_sb, newblock);
1165                 if (unlikely(!bh)) {
1166                         err = -ENOMEM;
1167                         goto cleanup;
1168                 }
1169                 lock_buffer(bh);
1170
1171                 err = ext4_journal_get_create_access(handle, bh);
1172                 if (err)
1173                         goto cleanup;
1174
1175                 neh = ext_block_hdr(bh);
1176                 neh->eh_entries = cpu_to_le16(1);
1177                 neh->eh_magic = EXT4_EXT_MAGIC;
1178                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1179                 neh->eh_depth = cpu_to_le16(depth - i);
1180                 fidx = EXT_FIRST_INDEX(neh);
1181                 fidx->ei_block = border;
1182                 ext4_idx_store_pblock(fidx, oldblock);
1183
1184                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1185                                 i, newblock, le32_to_cpu(border), oldblock);
1186
1187                 /* move remainder of path[i] to the new index block */
1188                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1189                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1190                         EXT4_ERROR_INODE(inode,
1191                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1192                                          le32_to_cpu(path[i].p_ext->ee_block));
1193                         err = -EIO;
1194                         goto cleanup;
1195                 }
1196                 /* start copy indexes */
1197                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1198                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1199                                 EXT_MAX_INDEX(path[i].p_hdr));
1200                 ext4_ext_show_move(inode, path, newblock, i);
1201                 if (m) {
1202                         memmove(++fidx, path[i].p_idx,
1203                                 sizeof(struct ext4_extent_idx) * m);
1204                         le16_add_cpu(&neh->eh_entries, m);
1205                 }
1206                 ext4_extent_block_csum_set(inode, neh);
1207                 set_buffer_uptodate(bh);
1208                 unlock_buffer(bh);
1209
1210                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1211                 if (err)
1212                         goto cleanup;
1213                 brelse(bh);
1214                 bh = NULL;
1215
1216                 /* correct old index */
1217                 if (m) {
1218                         err = ext4_ext_get_access(handle, inode, path + i);
1219                         if (err)
1220                                 goto cleanup;
1221                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1222                         err = ext4_ext_dirty(handle, inode, path + i);
1223                         if (err)
1224                                 goto cleanup;
1225                 }
1226
1227                 i--;
1228         }
1229
1230         /* insert new index */
1231         err = ext4_ext_insert_index(handle, inode, path + at,
1232                                     le32_to_cpu(border), newblock);
1233
1234 cleanup:
1235         if (bh) {
1236                 if (buffer_locked(bh))
1237                         unlock_buffer(bh);
1238                 brelse(bh);
1239         }
1240
1241         if (err) {
1242                 /* free all allocated blocks in error case */
1243                 for (i = 0; i < depth; i++) {
1244                         if (!ablocks[i])
1245                                 continue;
1246                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1247                                          EXT4_FREE_BLOCKS_METADATA);
1248                 }
1249         }
1250         kfree(ablocks);
1251
1252         return err;
1253 }
1254
1255 /*
1256  * ext4_ext_grow_indepth:
1257  * implements tree growing procedure:
1258  * - allocates new block
1259  * - moves top-level data (index block or leaf) into the new block
1260  * - initializes new top-level, creating index that points to the
1261  *   just created block
1262  */
1263 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1264                                  unsigned int flags)
1265 {
1266         struct ext4_extent_header *neh;
1267         struct buffer_head *bh;
1268         ext4_fsblk_t newblock, goal = 0;
1269         struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1270         int err = 0;
1271
1272         /* Try to prepend new index to old one */
1273         if (ext_depth(inode))
1274                 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1275         if (goal > le32_to_cpu(es->s_first_data_block)) {
1276                 flags |= EXT4_MB_HINT_TRY_GOAL;
1277                 goal--;
1278         } else
1279                 goal = ext4_inode_to_goal_block(inode);
1280         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1281                                         NULL, &err);
1282         if (newblock == 0)
1283                 return err;
1284
1285         bh = sb_getblk(inode->i_sb, newblock);
1286         if (unlikely(!bh))
1287                 return -ENOMEM;
1288         lock_buffer(bh);
1289
1290         err = ext4_journal_get_create_access(handle, bh);
1291         if (err) {
1292                 unlock_buffer(bh);
1293                 goto out;
1294         }
1295
1296         /* move top-level index/leaf into new block */
1297         memmove(bh->b_data, EXT4_I(inode)->i_data,
1298                 sizeof(EXT4_I(inode)->i_data));
1299
1300         /* set size of new block */
1301         neh = ext_block_hdr(bh);
1302         /* old root could have indexes or leaves
1303          * so calculate e_max right way */
1304         if (ext_depth(inode))
1305                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1306         else
1307                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1308         neh->eh_magic = EXT4_EXT_MAGIC;
1309         ext4_extent_block_csum_set(inode, neh);
1310         set_buffer_uptodate(bh);
1311         unlock_buffer(bh);
1312
1313         err = ext4_handle_dirty_metadata(handle, inode, bh);
1314         if (err)
1315                 goto out;
1316
1317         /* Update top-level index: num,max,pointer */
1318         neh = ext_inode_hdr(inode);
1319         neh->eh_entries = cpu_to_le16(1);
1320         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1321         if (neh->eh_depth == 0) {
1322                 /* Root extent block becomes index block */
1323                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1324                 EXT_FIRST_INDEX(neh)->ei_block =
1325                         EXT_FIRST_EXTENT(neh)->ee_block;
1326         }
1327         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1328                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1329                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1330                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1331
1332         le16_add_cpu(&neh->eh_depth, 1);
1333         ext4_mark_inode_dirty(handle, inode);
1334 out:
1335         brelse(bh);
1336
1337         return err;
1338 }
1339
1340 /*
1341  * ext4_ext_create_new_leaf:
1342  * finds empty index and adds new leaf.
1343  * if no free index is found, then it requests in-depth growing.
1344  */
1345 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1346                                     unsigned int mb_flags,
1347                                     unsigned int gb_flags,
1348                                     struct ext4_ext_path **ppath,
1349                                     struct ext4_extent *newext)
1350 {
1351         struct ext4_ext_path *path = *ppath;
1352         struct ext4_ext_path *curp;
1353         int depth, i, err = 0;
1354
1355 repeat:
1356         i = depth = ext_depth(inode);
1357
1358         /* walk up to the tree and look for free index entry */
1359         curp = path + depth;
1360         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1361                 i--;
1362                 curp--;
1363         }
1364
1365         /* we use already allocated block for index block,
1366          * so subsequent data blocks should be contiguous */
1367         if (EXT_HAS_FREE_INDEX(curp)) {
1368                 /* if we found index with free entry, then use that
1369                  * entry: create all needed subtree and add new leaf */
1370                 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1371                 if (err)
1372                         goto out;
1373
1374                 /* refill path */
1375                 path = ext4_find_extent(inode,
1376                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1377                                     ppath, gb_flags);
1378                 if (IS_ERR(path))
1379                         err = PTR_ERR(path);
1380         } else {
1381                 /* tree is full, time to grow in depth */
1382                 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1383                 if (err)
1384                         goto out;
1385
1386                 /* refill path */
1387                 path = ext4_find_extent(inode,
1388                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1389                                     ppath, gb_flags);
1390                 if (IS_ERR(path)) {
1391                         err = PTR_ERR(path);
1392                         goto out;
1393                 }
1394
1395                 /*
1396                  * only first (depth 0 -> 1) produces free space;
1397                  * in all other cases we have to split the grown tree
1398                  */
1399                 depth = ext_depth(inode);
1400                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1401                         /* now we need to split */
1402                         goto repeat;
1403                 }
1404         }
1405
1406 out:
1407         return err;
1408 }
1409
1410 /*
1411  * search the closest allocated block to the left for *logical
1412  * and returns it at @logical + it's physical address at @phys
1413  * if *logical is the smallest allocated block, the function
1414  * returns 0 at @phys
1415  * return value contains 0 (success) or error code
1416  */
1417 static int ext4_ext_search_left(struct inode *inode,
1418                                 struct ext4_ext_path *path,
1419                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1420 {
1421         struct ext4_extent_idx *ix;
1422         struct ext4_extent *ex;
1423         int depth, ee_len;
1424
1425         if (unlikely(path == NULL)) {
1426                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1427                 return -EIO;
1428         }
1429         depth = path->p_depth;
1430         *phys = 0;
1431
1432         if (depth == 0 && path->p_ext == NULL)
1433                 return 0;
1434
1435         /* usually extent in the path covers blocks smaller
1436          * then *logical, but it can be that extent is the
1437          * first one in the file */
1438
1439         ex = path[depth].p_ext;
1440         ee_len = ext4_ext_get_actual_len(ex);
1441         if (*logical < le32_to_cpu(ex->ee_block)) {
1442                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1443                         EXT4_ERROR_INODE(inode,
1444                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1445                                          *logical, le32_to_cpu(ex->ee_block));
1446                         return -EIO;
1447                 }
1448                 while (--depth >= 0) {
1449                         ix = path[depth].p_idx;
1450                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1451                                 EXT4_ERROR_INODE(inode,
1452                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1453                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1454                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1455                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1456                                   depth);
1457                                 return -EIO;
1458                         }
1459                 }
1460                 return 0;
1461         }
1462
1463         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1464                 EXT4_ERROR_INODE(inode,
1465                                  "logical %d < ee_block %d + ee_len %d!",
1466                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1467                 return -EIO;
1468         }
1469
1470         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1471         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1472         return 0;
1473 }
1474
1475 /*
1476  * search the closest allocated block to the right for *logical
1477  * and returns it at @logical + it's physical address at @phys
1478  * if *logical is the largest allocated block, the function
1479  * returns 0 at @phys
1480  * return value contains 0 (success) or error code
1481  */
1482 static int ext4_ext_search_right(struct inode *inode,
1483                                  struct ext4_ext_path *path,
1484                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1485                                  struct ext4_extent **ret_ex)
1486 {
1487         struct buffer_head *bh = NULL;
1488         struct ext4_extent_header *eh;
1489         struct ext4_extent_idx *ix;
1490         struct ext4_extent *ex;
1491         ext4_fsblk_t block;
1492         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1493         int ee_len;
1494
1495         if (unlikely(path == NULL)) {
1496                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1497                 return -EIO;
1498         }
1499         depth = path->p_depth;
1500         *phys = 0;
1501
1502         if (depth == 0 && path->p_ext == NULL)
1503                 return 0;
1504
1505         /* usually extent in the path covers blocks smaller
1506          * then *logical, but it can be that extent is the
1507          * first one in the file */
1508
1509         ex = path[depth].p_ext;
1510         ee_len = ext4_ext_get_actual_len(ex);
1511         if (*logical < le32_to_cpu(ex->ee_block)) {
1512                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1513                         EXT4_ERROR_INODE(inode,
1514                                          "first_extent(path[%d].p_hdr) != ex",
1515                                          depth);
1516                         return -EIO;
1517                 }
1518                 while (--depth >= 0) {
1519                         ix = path[depth].p_idx;
1520                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1521                                 EXT4_ERROR_INODE(inode,
1522                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1523                                                  *logical);
1524                                 return -EIO;
1525                         }
1526                 }
1527                 goto found_extent;
1528         }
1529
1530         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1531                 EXT4_ERROR_INODE(inode,
1532                                  "logical %d < ee_block %d + ee_len %d!",
1533                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1534                 return -EIO;
1535         }
1536
1537         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1538                 /* next allocated block in this leaf */
1539                 ex++;
1540                 goto found_extent;
1541         }
1542
1543         /* go up and search for index to the right */
1544         while (--depth >= 0) {
1545                 ix = path[depth].p_idx;
1546                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1547                         goto got_index;
1548         }
1549
1550         /* we've gone up to the root and found no index to the right */
1551         return 0;
1552
1553 got_index:
1554         /* we've found index to the right, let's
1555          * follow it and find the closest allocated
1556          * block to the right */
1557         ix++;
1558         block = ext4_idx_pblock(ix);
1559         while (++depth < path->p_depth) {
1560                 /* subtract from p_depth to get proper eh_depth */
1561                 bh = read_extent_tree_block(inode, block,
1562                                             path->p_depth - depth, 0);
1563                 if (IS_ERR(bh))
1564                         return PTR_ERR(bh);
1565                 eh = ext_block_hdr(bh);
1566                 ix = EXT_FIRST_INDEX(eh);
1567                 block = ext4_idx_pblock(ix);
1568                 put_bh(bh);
1569         }
1570
1571         bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1572         if (IS_ERR(bh))
1573                 return PTR_ERR(bh);
1574         eh = ext_block_hdr(bh);
1575         ex = EXT_FIRST_EXTENT(eh);
1576 found_extent:
1577         *logical = le32_to_cpu(ex->ee_block);
1578         *phys = ext4_ext_pblock(ex);
1579         *ret_ex = ex;
1580         if (bh)
1581                 put_bh(bh);
1582         return 0;
1583 }
1584
1585 /*
1586  * ext4_ext_next_allocated_block:
1587  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1588  * NOTE: it considers block number from index entry as
1589  * allocated block. Thus, index entries have to be consistent
1590  * with leaves.
1591  */
1592 ext4_lblk_t
1593 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1594 {
1595         int depth;
1596
1597         BUG_ON(path == NULL);
1598         depth = path->p_depth;
1599
1600         if (depth == 0 && path->p_ext == NULL)
1601                 return EXT_MAX_BLOCKS;
1602
1603         while (depth >= 0) {
1604                 if (depth == path->p_depth) {
1605                         /* leaf */
1606                         if (path[depth].p_ext &&
1607                                 path[depth].p_ext !=
1608                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1609                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1610                 } else {
1611                         /* index */
1612                         if (path[depth].p_idx !=
1613                                         EXT_LAST_INDEX(path[depth].p_hdr))
1614                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1615                 }
1616                 depth--;
1617         }
1618
1619         return EXT_MAX_BLOCKS;
1620 }
1621
1622 /*
1623  * ext4_ext_next_leaf_block:
1624  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1625  */
1626 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1627 {
1628         int depth;
1629
1630         BUG_ON(path == NULL);
1631         depth = path->p_depth;
1632
1633         /* zero-tree has no leaf blocks at all */
1634         if (depth == 0)
1635                 return EXT_MAX_BLOCKS;
1636
1637         /* go to index block */
1638         depth--;
1639
1640         while (depth >= 0) {
1641                 if (path[depth].p_idx !=
1642                                 EXT_LAST_INDEX(path[depth].p_hdr))
1643                         return (ext4_lblk_t)
1644                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1645                 depth--;
1646         }
1647
1648         return EXT_MAX_BLOCKS;
1649 }
1650
1651 /*
1652  * ext4_ext_correct_indexes:
1653  * if leaf gets modified and modified extent is first in the leaf,
1654  * then we have to correct all indexes above.
1655  * TODO: do we need to correct tree in all cases?
1656  */
1657 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1658                                 struct ext4_ext_path *path)
1659 {
1660         struct ext4_extent_header *eh;
1661         int depth = ext_depth(inode);
1662         struct ext4_extent *ex;
1663         __le32 border;
1664         int k, err = 0;
1665
1666         eh = path[depth].p_hdr;
1667         ex = path[depth].p_ext;
1668
1669         if (unlikely(ex == NULL || eh == NULL)) {
1670                 EXT4_ERROR_INODE(inode,
1671                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1672                 return -EIO;
1673         }
1674
1675         if (depth == 0) {
1676                 /* there is no tree at all */
1677                 return 0;
1678         }
1679
1680         if (ex != EXT_FIRST_EXTENT(eh)) {
1681                 /* we correct tree if first leaf got modified only */
1682                 return 0;
1683         }
1684
1685         /*
1686          * TODO: we need correction if border is smaller than current one
1687          */
1688         k = depth - 1;
1689         border = path[depth].p_ext->ee_block;
1690         err = ext4_ext_get_access(handle, inode, path + k);
1691         if (err)
1692                 return err;
1693         path[k].p_idx->ei_block = border;
1694         err = ext4_ext_dirty(handle, inode, path + k);
1695         if (err)
1696                 return err;
1697
1698         while (k--) {
1699                 /* change all left-side indexes */
1700                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1701                         break;
1702                 err = ext4_ext_get_access(handle, inode, path + k);
1703                 if (err)
1704                         break;
1705                 path[k].p_idx->ei_block = border;
1706                 err = ext4_ext_dirty(handle, inode, path + k);
1707                 if (err)
1708                         break;
1709         }
1710
1711         return err;
1712 }
1713
1714 int
1715 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1716                                 struct ext4_extent *ex2)
1717 {
1718         unsigned short ext1_ee_len, ext2_ee_len;
1719
1720         /*
1721          * Make sure that both extents are initialized. We don't merge
1722          * unwritten extents so that we can be sure that end_io code has
1723          * the extent that was written properly split out and conversion to
1724          * initialized is trivial.
1725          */
1726         if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1727                 return 0;
1728
1729         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1730         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1731
1732         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1733                         le32_to_cpu(ex2->ee_block))
1734                 return 0;
1735
1736         /*
1737          * To allow future support for preallocated extents to be added
1738          * as an RO_COMPAT feature, refuse to merge to extents if
1739          * this can result in the top bit of ee_len being set.
1740          */
1741         if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1742                 return 0;
1743         if (ext4_ext_is_unwritten(ex1) &&
1744             (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
1745              atomic_read(&EXT4_I(inode)->i_unwritten) ||
1746              (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
1747                 return 0;
1748 #ifdef AGGRESSIVE_TEST
1749         if (ext1_ee_len >= 4)
1750                 return 0;
1751 #endif
1752
1753         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1754                 return 1;
1755         return 0;
1756 }
1757
1758 /*
1759  * This function tries to merge the "ex" extent to the next extent in the tree.
1760  * It always tries to merge towards right. If you want to merge towards
1761  * left, pass "ex - 1" as argument instead of "ex".
1762  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1763  * 1 if they got merged.
1764  */
1765 static int ext4_ext_try_to_merge_right(struct inode *inode,
1766                                  struct ext4_ext_path *path,
1767                                  struct ext4_extent *ex)
1768 {
1769         struct ext4_extent_header *eh;
1770         unsigned int depth, len;
1771         int merge_done = 0, unwritten;
1772
1773         depth = ext_depth(inode);
1774         BUG_ON(path[depth].p_hdr == NULL);
1775         eh = path[depth].p_hdr;
1776
1777         while (ex < EXT_LAST_EXTENT(eh)) {
1778                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1779                         break;
1780                 /* merge with next extent! */
1781                 unwritten = ext4_ext_is_unwritten(ex);
1782                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1783                                 + ext4_ext_get_actual_len(ex + 1));
1784                 if (unwritten)
1785                         ext4_ext_mark_unwritten(ex);
1786
1787                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1788                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1789                                 * sizeof(struct ext4_extent);
1790                         memmove(ex + 1, ex + 2, len);
1791                 }
1792                 le16_add_cpu(&eh->eh_entries, -1);
1793                 merge_done = 1;
1794                 WARN_ON(eh->eh_entries == 0);
1795                 if (!eh->eh_entries)
1796                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1797         }
1798
1799         return merge_done;
1800 }
1801
1802 /*
1803  * This function does a very simple check to see if we can collapse
1804  * an extent tree with a single extent tree leaf block into the inode.
1805  */
1806 static void ext4_ext_try_to_merge_up(handle_t *handle,
1807                                      struct inode *inode,
1808                                      struct ext4_ext_path *path)
1809 {
1810         size_t s;
1811         unsigned max_root = ext4_ext_space_root(inode, 0);
1812         ext4_fsblk_t blk;
1813
1814         if ((path[0].p_depth != 1) ||
1815             (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1816             (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1817                 return;
1818
1819         /*
1820          * We need to modify the block allocation bitmap and the block
1821          * group descriptor to release the extent tree block.  If we
1822          * can't get the journal credits, give up.
1823          */
1824         if (ext4_journal_extend(handle, 2))
1825                 return;
1826
1827         /*
1828          * Copy the extent data up to the inode
1829          */
1830         blk = ext4_idx_pblock(path[0].p_idx);
1831         s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1832                 sizeof(struct ext4_extent_idx);
1833         s += sizeof(struct ext4_extent_header);
1834
1835         path[1].p_maxdepth = path[0].p_maxdepth;
1836         memcpy(path[0].p_hdr, path[1].p_hdr, s);
1837         path[0].p_depth = 0;
1838         path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1839                 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1840         path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1841
1842         brelse(path[1].p_bh);
1843         ext4_free_blocks(handle, inode, NULL, blk, 1,
1844                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1845 }
1846
1847 /*
1848  * This function tries to merge the @ex extent to neighbours in the tree.
1849  * return 1 if merge left else 0.
1850  */
1851 static void ext4_ext_try_to_merge(handle_t *handle,
1852                                   struct inode *inode,
1853                                   struct ext4_ext_path *path,
1854                                   struct ext4_extent *ex) {
1855         struct ext4_extent_header *eh;
1856         unsigned int depth;
1857         int merge_done = 0;
1858
1859         depth = ext_depth(inode);
1860         BUG_ON(path[depth].p_hdr == NULL);
1861         eh = path[depth].p_hdr;
1862
1863         if (ex > EXT_FIRST_EXTENT(eh))
1864                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1865
1866         if (!merge_done)
1867                 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1868
1869         ext4_ext_try_to_merge_up(handle, inode, path);
1870 }
1871
1872 /*
1873  * check if a portion of the "newext" extent overlaps with an
1874  * existing extent.
1875  *
1876  * If there is an overlap discovered, it updates the length of the newext
1877  * such that there will be no overlap, and then returns 1.
1878  * If there is no overlap found, it returns 0.
1879  */
1880 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1881                                            struct inode *inode,
1882                                            struct ext4_extent *newext,
1883                                            struct ext4_ext_path *path)
1884 {
1885         ext4_lblk_t b1, b2;
1886         unsigned int depth, len1;
1887         unsigned int ret = 0;
1888
1889         b1 = le32_to_cpu(newext->ee_block);
1890         len1 = ext4_ext_get_actual_len(newext);
1891         depth = ext_depth(inode);
1892         if (!path[depth].p_ext)
1893                 goto out;
1894         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1895
1896         /*
1897          * get the next allocated block if the extent in the path
1898          * is before the requested block(s)
1899          */
1900         if (b2 < b1) {
1901                 b2 = ext4_ext_next_allocated_block(path);
1902                 if (b2 == EXT_MAX_BLOCKS)
1903                         goto out;
1904                 b2 = EXT4_LBLK_CMASK(sbi, b2);
1905         }
1906
1907         /* check for wrap through zero on extent logical start block*/
1908         if (b1 + len1 < b1) {
1909                 len1 = EXT_MAX_BLOCKS - b1;
1910                 newext->ee_len = cpu_to_le16(len1);
1911                 ret = 1;
1912         }
1913
1914         /* check for overlap */
1915         if (b1 + len1 > b2) {
1916                 newext->ee_len = cpu_to_le16(b2 - b1);
1917                 ret = 1;
1918         }
1919 out:
1920         return ret;
1921 }
1922
1923 /*
1924  * ext4_ext_insert_extent:
1925  * tries to merge requsted extent into the existing extent or
1926  * inserts requested extent as new one into the tree,
1927  * creating new leaf in the no-space case.
1928  */
1929 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1930                                 struct ext4_ext_path **ppath,
1931                                 struct ext4_extent *newext, int gb_flags)
1932 {
1933         struct ext4_ext_path *path = *ppath;
1934         struct ext4_extent_header *eh;
1935         struct ext4_extent *ex, *fex;
1936         struct ext4_extent *nearex; /* nearest extent */
1937         struct ext4_ext_path *npath = NULL;
1938         int depth, len, err;
1939         ext4_lblk_t next;
1940         int mb_flags = 0, unwritten;
1941
1942         if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1943                 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1944         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1945                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1946                 return -EIO;
1947         }
1948         depth = ext_depth(inode);
1949         ex = path[depth].p_ext;
1950         eh = path[depth].p_hdr;
1951         if (unlikely(path[depth].p_hdr == NULL)) {
1952                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1953                 return -EIO;
1954         }
1955
1956         /* try to insert block into found extent and return */
1957         if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1958
1959                 /*
1960                  * Try to see whether we should rather test the extent on
1961                  * right from ex, or from the left of ex. This is because
1962                  * ext4_find_extent() can return either extent on the
1963                  * left, or on the right from the searched position. This
1964                  * will make merging more effective.
1965                  */
1966                 if (ex < EXT_LAST_EXTENT(eh) &&
1967                     (le32_to_cpu(ex->ee_block) +
1968                     ext4_ext_get_actual_len(ex) <
1969                     le32_to_cpu(newext->ee_block))) {
1970                         ex += 1;
1971                         goto prepend;
1972                 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1973                            (le32_to_cpu(newext->ee_block) +
1974                            ext4_ext_get_actual_len(newext) <
1975                            le32_to_cpu(ex->ee_block)))
1976                         ex -= 1;
1977
1978                 /* Try to append newex to the ex */
1979                 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1980                         ext_debug("append [%d]%d block to %u:[%d]%d"
1981                                   "(from %llu)\n",
1982                                   ext4_ext_is_unwritten(newext),
1983                                   ext4_ext_get_actual_len(newext),
1984                                   le32_to_cpu(ex->ee_block),
1985                                   ext4_ext_is_unwritten(ex),
1986                                   ext4_ext_get_actual_len(ex),
1987                                   ext4_ext_pblock(ex));
1988                         err = ext4_ext_get_access(handle, inode,
1989                                                   path + depth);
1990                         if (err)
1991                                 return err;
1992                         unwritten = ext4_ext_is_unwritten(ex);
1993                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1994                                         + ext4_ext_get_actual_len(newext));
1995                         if (unwritten)
1996                                 ext4_ext_mark_unwritten(ex);
1997                         eh = path[depth].p_hdr;
1998                         nearex = ex;
1999                         goto merge;
2000                 }
2001
2002 prepend:
2003                 /* Try to prepend newex to the ex */
2004                 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2005                         ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
2006                                   "(from %llu)\n",
2007                                   le32_to_cpu(newext->ee_block),
2008                                   ext4_ext_is_unwritten(newext),
2009                                   ext4_ext_get_actual_len(newext),
2010                                   le32_to_cpu(ex->ee_block),
2011                                   ext4_ext_is_unwritten(ex),
2012                                   ext4_ext_get_actual_len(ex),
2013                                   ext4_ext_pblock(ex));
2014                         err = ext4_ext_get_access(handle, inode,
2015                                                   path + depth);
2016                         if (err)
2017                                 return err;
2018
2019                         unwritten = ext4_ext_is_unwritten(ex);
2020                         ex->ee_block = newext->ee_block;
2021                         ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2022                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2023                                         + ext4_ext_get_actual_len(newext));
2024                         if (unwritten)
2025                                 ext4_ext_mark_unwritten(ex);
2026                         eh = path[depth].p_hdr;
2027                         nearex = ex;
2028                         goto merge;
2029                 }
2030         }
2031
2032         depth = ext_depth(inode);
2033         eh = path[depth].p_hdr;
2034         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2035                 goto has_space;
2036
2037         /* probably next leaf has space for us? */
2038         fex = EXT_LAST_EXTENT(eh);
2039         next = EXT_MAX_BLOCKS;
2040         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2041                 next = ext4_ext_next_leaf_block(path);
2042         if (next != EXT_MAX_BLOCKS) {
2043                 ext_debug("next leaf block - %u\n", next);
2044                 BUG_ON(npath != NULL);
2045                 npath = ext4_find_extent(inode, next, NULL, 0);
2046                 if (IS_ERR(npath))
2047                         return PTR_ERR(npath);
2048                 BUG_ON(npath->p_depth != path->p_depth);
2049                 eh = npath[depth].p_hdr;
2050                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2051                         ext_debug("next leaf isn't full(%d)\n",
2052                                   le16_to_cpu(eh->eh_entries));
2053                         path = npath;
2054                         goto has_space;
2055                 }
2056                 ext_debug("next leaf has no free space(%d,%d)\n",
2057                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2058         }
2059
2060         /*
2061          * There is no free space in the found leaf.
2062          * We're gonna add a new leaf in the tree.
2063          */
2064         if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2065                 mb_flags |= EXT4_MB_USE_RESERVED;
2066         err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2067                                        ppath, newext);
2068         if (err)
2069                 goto cleanup;
2070         depth = ext_depth(inode);
2071         eh = path[depth].p_hdr;
2072
2073 has_space:
2074         nearex = path[depth].p_ext;
2075
2076         err = ext4_ext_get_access(handle, inode, path + depth);
2077         if (err)
2078                 goto cleanup;
2079
2080         if (!nearex) {
2081                 /* there is no extent in this leaf, create first one */
2082                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2083                                 le32_to_cpu(newext->ee_block),
2084                                 ext4_ext_pblock(newext),
2085                                 ext4_ext_is_unwritten(newext),
2086                                 ext4_ext_get_actual_len(newext));
2087                 nearex = EXT_FIRST_EXTENT(eh);
2088         } else {
2089                 if (le32_to_cpu(newext->ee_block)
2090                            > le32_to_cpu(nearex->ee_block)) {
2091                         /* Insert after */
2092                         ext_debug("insert %u:%llu:[%d]%d before: "
2093                                         "nearest %p\n",
2094                                         le32_to_cpu(newext->ee_block),
2095                                         ext4_ext_pblock(newext),
2096                                         ext4_ext_is_unwritten(newext),
2097                                         ext4_ext_get_actual_len(newext),
2098                                         nearex);
2099                         nearex++;
2100                 } else {
2101                         /* Insert before */
2102                         BUG_ON(newext->ee_block == nearex->ee_block);
2103                         ext_debug("insert %u:%llu:[%d]%d after: "
2104                                         "nearest %p\n",
2105                                         le32_to_cpu(newext->ee_block),
2106                                         ext4_ext_pblock(newext),
2107                                         ext4_ext_is_unwritten(newext),
2108                                         ext4_ext_get_actual_len(newext),
2109                                         nearex);
2110                 }
2111                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2112                 if (len > 0) {
2113                         ext_debug("insert %u:%llu:[%d]%d: "
2114                                         "move %d extents from 0x%p to 0x%p\n",
2115                                         le32_to_cpu(newext->ee_block),
2116                                         ext4_ext_pblock(newext),
2117                                         ext4_ext_is_unwritten(newext),
2118                                         ext4_ext_get_actual_len(newext),
2119                                         len, nearex, nearex + 1);
2120                         memmove(nearex + 1, nearex,
2121                                 len * sizeof(struct ext4_extent));
2122                 }
2123         }
2124
2125         le16_add_cpu(&eh->eh_entries, 1);
2126         path[depth].p_ext = nearex;
2127         nearex->ee_block = newext->ee_block;
2128         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2129         nearex->ee_len = newext->ee_len;
2130
2131 merge:
2132         /* try to merge extents */
2133         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2134                 ext4_ext_try_to_merge(handle, inode, path, nearex);
2135
2136
2137         /* time to correct all indexes above */
2138         err = ext4_ext_correct_indexes(handle, inode, path);
2139         if (err)
2140                 goto cleanup;
2141
2142         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2143
2144 cleanup:
2145         ext4_ext_drop_refs(npath);
2146         kfree(npath);
2147         return err;
2148 }
2149
2150 static int ext4_fill_fiemap_extents(struct inode *inode,
2151                                     ext4_lblk_t block, ext4_lblk_t num,
2152                                     struct fiemap_extent_info *fieinfo)
2153 {
2154         struct ext4_ext_path *path = NULL;
2155         struct ext4_extent *ex;
2156         struct extent_status es;
2157         ext4_lblk_t next, next_del, start = 0, end = 0;
2158         ext4_lblk_t last = block + num;
2159         int exists, depth = 0, err = 0;
2160         unsigned int flags = 0;
2161         unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2162
2163         while (block < last && block != EXT_MAX_BLOCKS) {
2164                 num = last - block;
2165                 /* find extent for this block */
2166                 down_read(&EXT4_I(inode)->i_data_sem);
2167
2168                 path = ext4_find_extent(inode, block, &path, 0);
2169                 if (IS_ERR(path)) {
2170                         up_read(&EXT4_I(inode)->i_data_sem);
2171                         err = PTR_ERR(path);
2172                         path = NULL;
2173                         break;
2174                 }
2175
2176                 depth = ext_depth(inode);
2177                 if (unlikely(path[depth].p_hdr == NULL)) {
2178                         up_read(&EXT4_I(inode)->i_data_sem);
2179                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2180                         err = -EIO;
2181                         break;
2182                 }
2183                 ex = path[depth].p_ext;
2184                 next = ext4_ext_next_allocated_block(path);
2185
2186                 flags = 0;
2187                 exists = 0;
2188                 if (!ex) {
2189                         /* there is no extent yet, so try to allocate
2190                          * all requested space */
2191                         start = block;
2192                         end = block + num;
2193                 } else if (le32_to_cpu(ex->ee_block) > block) {
2194                         /* need to allocate space before found extent */
2195                         start = block;
2196                         end = le32_to_cpu(ex->ee_block);
2197                         if (block + num < end)
2198                                 end = block + num;
2199                 } else if (block >= le32_to_cpu(ex->ee_block)
2200                                         + ext4_ext_get_actual_len(ex)) {
2201                         /* need to allocate space after found extent */
2202                         start = block;
2203                         end = block + num;
2204                         if (end >= next)
2205                                 end = next;
2206                 } else if (block >= le32_to_cpu(ex->ee_block)) {
2207                         /*
2208                          * some part of requested space is covered
2209                          * by found extent
2210                          */
2211                         start = block;
2212                         end = le32_to_cpu(ex->ee_block)
2213                                 + ext4_ext_get_actual_len(ex);
2214                         if (block + num < end)
2215                                 end = block + num;
2216                         exists = 1;
2217                 } else {
2218                         BUG();
2219                 }
2220                 BUG_ON(end <= start);
2221
2222                 if (!exists) {
2223                         es.es_lblk = start;
2224                         es.es_len = end - start;
2225                         es.es_pblk = 0;
2226                 } else {
2227                         es.es_lblk = le32_to_cpu(ex->ee_block);
2228                         es.es_len = ext4_ext_get_actual_len(ex);
2229                         es.es_pblk = ext4_ext_pblock(ex);
2230                         if (ext4_ext_is_unwritten(ex))
2231                                 flags |= FIEMAP_EXTENT_UNWRITTEN;
2232                 }
2233
2234                 /*
2235                  * Find delayed extent and update es accordingly. We call
2236                  * it even in !exists case to find out whether es is the
2237                  * last existing extent or not.
2238                  */
2239                 next_del = ext4_find_delayed_extent(inode, &es);
2240                 if (!exists && next_del) {
2241                         exists = 1;
2242                         flags |= (FIEMAP_EXTENT_DELALLOC |
2243                                   FIEMAP_EXTENT_UNKNOWN);
2244                 }
2245                 up_read(&EXT4_I(inode)->i_data_sem);
2246
2247                 if (unlikely(es.es_len == 0)) {
2248                         EXT4_ERROR_INODE(inode, "es.es_len == 0");
2249                         err = -EIO;
2250                         break;
2251                 }
2252
2253                 /*
2254                  * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2255                  * we need to check next == EXT_MAX_BLOCKS because it is
2256                  * possible that an extent is with unwritten and delayed
2257                  * status due to when an extent is delayed allocated and
2258                  * is allocated by fallocate status tree will track both of
2259                  * them in a extent.
2260                  *
2261                  * So we could return a unwritten and delayed extent, and
2262                  * its block is equal to 'next'.
2263                  */
2264                 if (next == next_del && next == EXT_MAX_BLOCKS) {
2265                         flags |= FIEMAP_EXTENT_LAST;
2266                         if (unlikely(next_del != EXT_MAX_BLOCKS ||
2267                                      next != EXT_MAX_BLOCKS)) {
2268                                 EXT4_ERROR_INODE(inode,
2269                                                  "next extent == %u, next "
2270                                                  "delalloc extent = %u",
2271                                                  next, next_del);
2272                                 err = -EIO;
2273                                 break;
2274                         }
2275                 }
2276
2277                 if (exists) {
2278                         err = fiemap_fill_next_extent(fieinfo,
2279                                 (__u64)es.es_lblk << blksize_bits,
2280                                 (__u64)es.es_pblk << blksize_bits,
2281                                 (__u64)es.es_len << blksize_bits,
2282                                 flags);
2283                         if (err < 0)
2284                                 break;
2285                         if (err == 1) {
2286                                 err = 0;
2287                                 break;
2288                         }
2289                 }
2290
2291                 block = es.es_lblk + es.es_len;
2292         }
2293
2294         ext4_ext_drop_refs(path);
2295         kfree(path);
2296         return err;
2297 }
2298
2299 /*
2300  * ext4_ext_put_gap_in_cache:
2301  * calculate boundaries of the gap that the requested block fits into
2302  * and cache this gap
2303  */
2304 static void
2305 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2306                                 ext4_lblk_t block)
2307 {
2308         int depth = ext_depth(inode);
2309         unsigned long len = 0;
2310         ext4_lblk_t lblock = 0;
2311         struct ext4_extent *ex;
2312
2313         ex = path[depth].p_ext;
2314         if (ex == NULL) {
2315                 /*
2316                  * there is no extent yet, so gap is [0;-] and we
2317                  * don't cache it
2318                  */
2319                 ext_debug("cache gap(whole file):");
2320         } else if (block < le32_to_cpu(ex->ee_block)) {
2321                 lblock = block;
2322                 len = le32_to_cpu(ex->ee_block) - block;
2323                 ext_debug("cache gap(before): %u [%u:%u]",
2324                                 block,
2325                                 le32_to_cpu(ex->ee_block),
2326                                  ext4_ext_get_actual_len(ex));
2327                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2328                         ext4_es_insert_extent(inode, lblock, len, ~0,
2329                                               EXTENT_STATUS_HOLE);
2330         } else if (block >= le32_to_cpu(ex->ee_block)
2331                         + ext4_ext_get_actual_len(ex)) {
2332                 ext4_lblk_t next;
2333                 lblock = le32_to_cpu(ex->ee_block)
2334                         + ext4_ext_get_actual_len(ex);
2335
2336                 next = ext4_ext_next_allocated_block(path);
2337                 ext_debug("cache gap(after): [%u:%u] %u",
2338                                 le32_to_cpu(ex->ee_block),
2339                                 ext4_ext_get_actual_len(ex),
2340                                 block);
2341                 BUG_ON(next == lblock);
2342                 len = next - lblock;
2343                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2344                         ext4_es_insert_extent(inode, lblock, len, ~0,
2345                                               EXTENT_STATUS_HOLE);
2346         } else {
2347                 BUG();
2348         }
2349
2350         ext_debug(" -> %u:%lu\n", lblock, len);
2351 }
2352
2353 /*
2354  * ext4_ext_rm_idx:
2355  * removes index from the index block.
2356  */
2357 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2358                         struct ext4_ext_path *path, int depth)
2359 {
2360         int err;
2361         ext4_fsblk_t leaf;
2362
2363         /* free index block */
2364         depth--;
2365         path = path + depth;
2366         leaf = ext4_idx_pblock(path->p_idx);
2367         if (unlikely(path->p_hdr->eh_entries == 0)) {
2368                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2369                 return -EIO;
2370         }
2371         err = ext4_ext_get_access(handle, inode, path);
2372         if (err)
2373                 return err;
2374
2375         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2376                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2377                 len *= sizeof(struct ext4_extent_idx);
2378                 memmove(path->p_idx, path->p_idx + 1, len);
2379         }
2380
2381         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2382         err = ext4_ext_dirty(handle, inode, path);
2383         if (err)
2384                 return err;
2385         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2386         trace_ext4_ext_rm_idx(inode, leaf);
2387
2388         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2389                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2390
2391         while (--depth >= 0) {
2392                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2393                         break;
2394                 path--;
2395                 err = ext4_ext_get_access(handle, inode, path);
2396                 if (err)
2397                         break;
2398                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2399                 err = ext4_ext_dirty(handle, inode, path);
2400                 if (err)
2401                         break;
2402         }
2403         return err;
2404 }
2405
2406 /*
2407  * ext4_ext_calc_credits_for_single_extent:
2408  * This routine returns max. credits that needed to insert an extent
2409  * to the extent tree.
2410  * When pass the actual path, the caller should calculate credits
2411  * under i_data_sem.
2412  */
2413 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2414                                                 struct ext4_ext_path *path)
2415 {
2416         if (path) {
2417                 int depth = ext_depth(inode);
2418                 int ret = 0;
2419
2420                 /* probably there is space in leaf? */
2421                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2422                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2423
2424                         /*
2425                          *  There are some space in the leaf tree, no
2426                          *  need to account for leaf block credit
2427                          *
2428                          *  bitmaps and block group descriptor blocks
2429                          *  and other metadata blocks still need to be
2430                          *  accounted.
2431                          */
2432                         /* 1 bitmap, 1 block group descriptor */
2433                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2434                         return ret;
2435                 }
2436         }
2437
2438         return ext4_chunk_trans_blocks(inode, nrblocks);
2439 }
2440
2441 /*
2442  * How many index/leaf blocks need to change/allocate to add @extents extents?
2443  *
2444  * If we add a single extent, then in the worse case, each tree level
2445  * index/leaf need to be changed in case of the tree split.
2446  *
2447  * If more extents are inserted, they could cause the whole tree split more
2448  * than once, but this is really rare.
2449  */
2450 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2451 {
2452         int index;
2453         int depth;
2454
2455         /* If we are converting the inline data, only one is needed here. */
2456         if (ext4_has_inline_data(inode))
2457                 return 1;
2458
2459         depth = ext_depth(inode);
2460
2461         if (extents <= 1)
2462                 index = depth * 2;
2463         else
2464                 index = depth * 3;
2465
2466         return index;
2467 }
2468
2469 static inline int get_default_free_blocks_flags(struct inode *inode)
2470 {
2471         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2472                 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2473         else if (ext4_should_journal_data(inode))
2474                 return EXT4_FREE_BLOCKS_FORGET;
2475         return 0;
2476 }
2477
2478 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2479                               struct ext4_extent *ex,
2480                               long long *partial_cluster,
2481                               ext4_lblk_t from, ext4_lblk_t to)
2482 {
2483         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2484         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2485         ext4_fsblk_t pblk;
2486         int flags = get_default_free_blocks_flags(inode);
2487
2488         /*
2489          * For bigalloc file systems, we never free a partial cluster
2490          * at the beginning of the extent.  Instead, we make a note
2491          * that we tried freeing the cluster, and check to see if we
2492          * need to free it on a subsequent call to ext4_remove_blocks,
2493          * or at the end of the ext4_truncate() operation.
2494          */
2495         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2496
2497         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2498         /*
2499          * If we have a partial cluster, and it's different from the
2500          * cluster of the last block, we need to explicitly free the
2501          * partial cluster here.
2502          */
2503         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2504         if ((*partial_cluster > 0) &&
2505             (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2506                 ext4_free_blocks(handle, inode, NULL,
2507                                  EXT4_C2B(sbi, *partial_cluster),
2508                                  sbi->s_cluster_ratio, flags);
2509                 *partial_cluster = 0;
2510         }
2511
2512 #ifdef EXTENTS_STATS
2513         {
2514                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2515                 spin_lock(&sbi->s_ext_stats_lock);
2516                 sbi->s_ext_blocks += ee_len;
2517                 sbi->s_ext_extents++;
2518                 if (ee_len < sbi->s_ext_min)
2519                         sbi->s_ext_min = ee_len;
2520                 if (ee_len > sbi->s_ext_max)
2521                         sbi->s_ext_max = ee_len;
2522                 if (ext_depth(inode) > sbi->s_depth_max)
2523                         sbi->s_depth_max = ext_depth(inode);
2524                 spin_unlock(&sbi->s_ext_stats_lock);
2525         }
2526 #endif
2527         if (from >= le32_to_cpu(ex->ee_block)
2528             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2529                 /* tail removal */
2530                 ext4_lblk_t num;
2531                 unsigned int unaligned;
2532
2533                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2534                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2535                 /*
2536                  * Usually we want to free partial cluster at the end of the
2537                  * extent, except for the situation when the cluster is still
2538                  * used by any other extent (partial_cluster is negative).
2539                  */
2540                 if (*partial_cluster < 0 &&
2541                     -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2542                         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2543
2544                 ext_debug("free last %u blocks starting %llu partial %lld\n",
2545                           num, pblk, *partial_cluster);
2546                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2547                 /*
2548                  * If the block range to be freed didn't start at the
2549                  * beginning of a cluster, and we removed the entire
2550                  * extent and the cluster is not used by any other extent,
2551                  * save the partial cluster here, since we might need to
2552                  * delete if we determine that the truncate operation has
2553                  * removed all of the blocks in the cluster.
2554                  *
2555                  * On the other hand, if we did not manage to free the whole
2556                  * extent, we have to mark the cluster as used (store negative
2557                  * cluster number in partial_cluster).
2558                  */
2559                 unaligned = EXT4_PBLK_COFF(sbi, pblk);
2560                 if (unaligned && (ee_len == num) &&
2561                     (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2562                         *partial_cluster = EXT4_B2C(sbi, pblk);
2563                 else if (unaligned)
2564                         *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2565                 else if (*partial_cluster > 0)
2566                         *partial_cluster = 0;
2567         } else
2568                 ext4_error(sbi->s_sb, "strange request: removal(2) "
2569                            "%u-%u from %u:%u\n",
2570                            from, to, le32_to_cpu(ex->ee_block), ee_len);
2571         return 0;
2572 }
2573
2574
2575 /*
2576  * ext4_ext_rm_leaf() Removes the extents associated with the
2577  * blocks appearing between "start" and "end", and splits the extents
2578  * if "start" and "end" appear in the same extent
2579  *
2580  * @handle: The journal handle
2581  * @inode:  The files inode
2582  * @path:   The path to the leaf
2583  * @partial_cluster: The cluster which we'll have to free if all extents
2584  *                   has been released from it. It gets negative in case
2585  *                   that the cluster is still used.
2586  * @start:  The first block to remove
2587  * @end:   The last block to remove
2588  */
2589 static int
2590 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2591                  struct ext4_ext_path *path,
2592                  long long *partial_cluster,
2593                  ext4_lblk_t start, ext4_lblk_t end)
2594 {
2595         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2596         int err = 0, correct_index = 0;
2597         int depth = ext_depth(inode), credits;
2598         struct ext4_extent_header *eh;
2599         ext4_lblk_t a, b;
2600         unsigned num;
2601         ext4_lblk_t ex_ee_block;
2602         unsigned short ex_ee_len;
2603         unsigned unwritten = 0;
2604         struct ext4_extent *ex;
2605         ext4_fsblk_t pblk;
2606
2607         /* the header must be checked already in ext4_ext_remove_space() */
2608         ext_debug("truncate since %u in leaf to %u\n", start, end);
2609         if (!path[depth].p_hdr)
2610                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2611         eh = path[depth].p_hdr;
2612         if (unlikely(path[depth].p_hdr == NULL)) {
2613                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2614                 return -EIO;
2615         }
2616         /* find where to start removing */
2617         ex = path[depth].p_ext;
2618         if (!ex)
2619                 ex = EXT_LAST_EXTENT(eh);
2620
2621         ex_ee_block = le32_to_cpu(ex->ee_block);
2622         ex_ee_len = ext4_ext_get_actual_len(ex);
2623
2624         /*
2625          * If we're starting with an extent other than the last one in the
2626          * node, we need to see if it shares a cluster with the extent to
2627          * the right (towards the end of the file). If its leftmost cluster
2628          * is this extent's rightmost cluster and it is not cluster aligned,
2629          * we'll mark it as a partial that is not to be deallocated.
2630          */
2631
2632         if (ex != EXT_LAST_EXTENT(eh)) {
2633                 ext4_fsblk_t current_pblk, right_pblk;
2634                 long long current_cluster, right_cluster;
2635
2636                 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2637                 current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2638                 right_pblk = ext4_ext_pblock(ex + 1);
2639                 right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2640                 if (current_cluster == right_cluster &&
2641                         EXT4_PBLK_COFF(sbi, right_pblk))
2642                         *partial_cluster = -right_cluster;
2643         }
2644
2645         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2646
2647         while (ex >= EXT_FIRST_EXTENT(eh) &&
2648                         ex_ee_block + ex_ee_len > start) {
2649
2650                 if (ext4_ext_is_unwritten(ex))
2651                         unwritten = 1;
2652                 else
2653                         unwritten = 0;
2654
2655                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2656                           unwritten, ex_ee_len);
2657                 path[depth].p_ext = ex;
2658
2659                 a = ex_ee_block > start ? ex_ee_block : start;
2660                 b = ex_ee_block+ex_ee_len - 1 < end ?
2661                         ex_ee_block+ex_ee_len - 1 : end;
2662
2663                 ext_debug("  border %u:%u\n", a, b);
2664
2665                 /* If this extent is beyond the end of the hole, skip it */
2666                 if (end < ex_ee_block) {
2667                         /*
2668                          * We're going to skip this extent and move to another,
2669                          * so if this extent is not cluster aligned we have
2670                          * to mark the current cluster as used to avoid
2671                          * accidentally freeing it later on
2672                          */
2673                         pblk = ext4_ext_pblock(ex);
2674                         if (EXT4_PBLK_COFF(sbi, pblk))
2675                                 *partial_cluster =
2676                                         -((long long)EXT4_B2C(sbi, pblk));
2677                         ex--;
2678                         ex_ee_block = le32_to_cpu(ex->ee_block);
2679                         ex_ee_len = ext4_ext_get_actual_len(ex);
2680                         continue;
2681                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2682                         EXT4_ERROR_INODE(inode,
2683                                          "can not handle truncate %u:%u "
2684                                          "on extent %u:%u",
2685                                          start, end, ex_ee_block,
2686                                          ex_ee_block + ex_ee_len - 1);
2687                         err = -EIO;
2688                         goto out;
2689                 } else if (a != ex_ee_block) {
2690                         /* remove tail of the extent */
2691                         num = a - ex_ee_block;
2692                 } else {
2693                         /* remove whole extent: excellent! */
2694                         num = 0;
2695                 }
2696                 /*
2697                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2698                  * descriptor) for each block group; assume two block
2699                  * groups plus ex_ee_len/blocks_per_block_group for
2700                  * the worst case
2701                  */
2702                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2703                 if (ex == EXT_FIRST_EXTENT(eh)) {
2704                         correct_index = 1;
2705                         credits += (ext_depth(inode)) + 1;
2706                 }
2707                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2708
2709                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2710                 if (err)
2711                         goto out;
2712
2713                 err = ext4_ext_get_access(handle, inode, path + depth);
2714                 if (err)
2715                         goto out;
2716
2717                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2718                                          a, b);
2719                 if (err)
2720                         goto out;
2721
2722                 if (num == 0)
2723                         /* this extent is removed; mark slot entirely unused */
2724                         ext4_ext_store_pblock(ex, 0);
2725
2726                 ex->ee_len = cpu_to_le16(num);
2727                 /*
2728                  * Do not mark unwritten if all the blocks in the
2729                  * extent have been removed.
2730                  */
2731                 if (unwritten && num)
2732                         ext4_ext_mark_unwritten(ex);
2733                 /*
2734                  * If the extent was completely released,
2735                  * we need to remove it from the leaf
2736                  */
2737                 if (num == 0) {
2738                         if (end != EXT_MAX_BLOCKS - 1) {
2739                                 /*
2740                                  * For hole punching, we need to scoot all the
2741                                  * extents up when an extent is removed so that
2742                                  * we dont have blank extents in the middle
2743                                  */
2744                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2745                                         sizeof(struct ext4_extent));
2746
2747                                 /* Now get rid of the one at the end */
2748                                 memset(EXT_LAST_EXTENT(eh), 0,
2749                                         sizeof(struct ext4_extent));
2750                         }
2751                         le16_add_cpu(&eh->eh_entries, -1);
2752                 } else if (*partial_cluster > 0)
2753                         *partial_cluster = 0;
2754
2755                 err = ext4_ext_dirty(handle, inode, path + depth);
2756                 if (err)
2757                         goto out;
2758
2759                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2760                                 ext4_ext_pblock(ex));
2761                 ex--;
2762                 ex_ee_block = le32_to_cpu(ex->ee_block);
2763                 ex_ee_len = ext4_ext_get_actual_len(ex);
2764         }
2765
2766         if (correct_index && eh->eh_entries)
2767                 err = ext4_ext_correct_indexes(handle, inode, path);
2768
2769         /*
2770          * If there's a partial cluster and at least one extent remains in
2771          * the leaf, free the partial cluster if it isn't shared with the
2772          * current extent.  If there's a partial cluster and no extents
2773          * remain in the leaf, it can't be freed here.  It can only be
2774          * freed when it's possible to determine if it's not shared with
2775          * any other extent - when the next leaf is processed or when space
2776          * removal is complete.
2777          */
2778         if (*partial_cluster > 0 && eh->eh_entries &&
2779             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2780              *partial_cluster)) {
2781                 int flags = get_default_free_blocks_flags(inode);
2782
2783                 ext4_free_blocks(handle, inode, NULL,
2784                                  EXT4_C2B(sbi, *partial_cluster),
2785                                  sbi->s_cluster_ratio, flags);
2786                 *partial_cluster = 0;
2787         }
2788
2789         /* if this leaf is free, then we should
2790          * remove it from index block above */
2791         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2792                 err = ext4_ext_rm_idx(handle, inode, path, depth);
2793
2794 out:
2795         return err;
2796 }
2797
2798 /*
2799  * ext4_ext_more_to_rm:
2800  * returns 1 if current index has to be freed (even partial)
2801  */
2802 static int
2803 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2804 {
2805         BUG_ON(path->p_idx == NULL);
2806
2807         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2808                 return 0;
2809
2810         /*
2811          * if truncate on deeper level happened, it wasn't partial,
2812          * so we have to consider current index for truncation
2813          */
2814         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2815                 return 0;
2816         return 1;
2817 }
2818
2819 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2820                           ext4_lblk_t end)
2821 {
2822         struct super_block *sb = inode->i_sb;
2823         int depth = ext_depth(inode);
2824         struct ext4_ext_path *path = NULL;
2825         long long partial_cluster = 0;
2826         handle_t *handle;
2827         int i = 0, err = 0;
2828
2829         ext_debug("truncate since %u to %u\n", start, end);
2830
2831         /* probably first extent we're gonna free will be last in block */
2832         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2833         if (IS_ERR(handle))
2834                 return PTR_ERR(handle);
2835
2836 again:
2837         trace_ext4_ext_remove_space(inode, start, end, depth);
2838
2839         /*
2840          * Check if we are removing extents inside the extent tree. If that
2841          * is the case, we are going to punch a hole inside the extent tree
2842          * so we have to check whether we need to split the extent covering
2843          * the last block to remove so we can easily remove the part of it
2844          * in ext4_ext_rm_leaf().
2845          */
2846         if (end < EXT_MAX_BLOCKS - 1) {
2847                 struct ext4_extent *ex;
2848                 ext4_lblk_t ee_block;
2849
2850                 /* find extent for this block */
2851                 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2852                 if (IS_ERR(path)) {
2853                         ext4_journal_stop(handle);
2854                         return PTR_ERR(path);
2855                 }
2856                 depth = ext_depth(inode);
2857                 /* Leaf not may not exist only if inode has no blocks at all */
2858                 ex = path[depth].p_ext;
2859                 if (!ex) {
2860                         if (depth) {
2861                                 EXT4_ERROR_INODE(inode,
2862                                                  "path[%d].p_hdr == NULL",
2863                                                  depth);
2864                                 err = -EIO;
2865                         }
2866                         goto out;
2867                 }
2868
2869                 ee_block = le32_to_cpu(ex->ee_block);
2870
2871                 /*
2872                  * See if the last block is inside the extent, if so split
2873                  * the extent at 'end' block so we can easily remove the
2874                  * tail of the first part of the split extent in
2875                  * ext4_ext_rm_leaf().
2876                  */
2877                 if (end >= ee_block &&
2878                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2879                         /*
2880                          * Split the extent in two so that 'end' is the last
2881                          * block in the first new extent. Also we should not
2882                          * fail removing space due to ENOSPC so try to use
2883                          * reserved block if that happens.
2884                          */
2885                         err = ext4_force_split_extent_at(handle, inode, &path,
2886                                                          end + 1, 1);
2887                         if (err < 0)
2888                                 goto out;
2889                 }
2890         }
2891         /*
2892          * We start scanning from right side, freeing all the blocks
2893          * after i_size and walking into the tree depth-wise.
2894          */
2895         depth = ext_depth(inode);
2896         if (path) {
2897                 int k = i = depth;
2898                 while (--k > 0)
2899                         path[k].p_block =
2900                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2901         } else {
2902                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2903                                GFP_NOFS);
2904                 if (path == NULL) {
2905                         ext4_journal_stop(handle);
2906                         return -ENOMEM;
2907                 }
2908                 path[0].p_maxdepth = path[0].p_depth = depth;
2909                 path[0].p_hdr = ext_inode_hdr(inode);
2910                 i = 0;
2911
2912                 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2913                         err = -EIO;
2914                         goto out;
2915                 }
2916         }
2917         err = 0;
2918
2919         while (i >= 0 && err == 0) {
2920                 if (i == depth) {
2921                         /* this is leaf block */
2922                         err = ext4_ext_rm_leaf(handle, inode, path,
2923                                                &partial_cluster, start,
2924                                                end);
2925                         /* root level has p_bh == NULL, brelse() eats this */
2926                         brelse(path[i].p_bh);
2927                         path[i].p_bh = NULL;
2928                         i--;
2929                         continue;
2930                 }
2931
2932                 /* this is index block */
2933                 if (!path[i].p_hdr) {
2934                         ext_debug("initialize header\n");
2935                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2936                 }
2937
2938                 if (!path[i].p_idx) {
2939                         /* this level hasn't been touched yet */
2940                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2941                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2942                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2943                                   path[i].p_hdr,
2944                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2945                 } else {
2946                         /* we were already here, see at next index */
2947                         path[i].p_idx--;
2948                 }
2949
2950                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2951                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2952                                 path[i].p_idx);
2953                 if (ext4_ext_more_to_rm(path + i)) {
2954                         struct buffer_head *bh;
2955                         /* go to the next level */
2956                         ext_debug("move to level %d (block %llu)\n",
2957                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2958                         memset(path + i + 1, 0, sizeof(*path));
2959                         bh = read_extent_tree_block(inode,
2960                                 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2961                                 EXT4_EX_NOCACHE);
2962                         if (IS_ERR(bh)) {
2963                                 /* should we reset i_size? */
2964                                 err = PTR_ERR(bh);
2965                                 break;
2966                         }
2967                         /* Yield here to deal with large extent trees.
2968                          * Should be a no-op if we did IO above. */
2969                         cond_resched();
2970                         if (WARN_ON(i + 1 > depth)) {
2971                                 err = -EIO;
2972                                 break;
2973                         }
2974                         path[i + 1].p_bh = bh;
2975
2976                         /* save actual number of indexes since this
2977                          * number is changed at the next iteration */
2978                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2979                         i++;
2980                 } else {
2981                         /* we finished processing this index, go up */
2982                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2983                                 /* index is empty, remove it;
2984                                  * handle must be already prepared by the
2985                                  * truncatei_leaf() */
2986                                 err = ext4_ext_rm_idx(handle, inode, path, i);
2987                         }
2988                         /* root level has p_bh == NULL, brelse() eats this */
2989                         brelse(path[i].p_bh);
2990                         path[i].p_bh = NULL;
2991                         i--;
2992                         ext_debug("return to level %d\n", i);
2993                 }
2994         }
2995
2996         trace_ext4_ext_remove_space_done(inode, start, end, depth,
2997                         partial_cluster, path->p_hdr->eh_entries);
2998
2999         /* If we still have something in the partial cluster and we have removed
3000          * even the first extent, then we should free the blocks in the partial
3001          * cluster as well. */
3002         if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
3003                 int flags = get_default_free_blocks_flags(inode);
3004
3005                 ext4_free_blocks(handle, inode, NULL,
3006                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
3007                                  EXT4_SB(sb)->s_cluster_ratio, flags);
3008                 partial_cluster = 0;
3009         }
3010
3011         /* TODO: flexible tree reduction should be here */
3012         if (path->p_hdr->eh_entries == 0) {
3013                 /*
3014                  * truncate to zero freed all the tree,
3015                  * so we need to correct eh_depth
3016                  */
3017                 err = ext4_ext_get_access(handle, inode, path);
3018                 if (err == 0) {
3019                         ext_inode_hdr(inode)->eh_depth = 0;
3020                         ext_inode_hdr(inode)->eh_max =
3021                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
3022                         err = ext4_ext_dirty(handle, inode, path);
3023                 }
3024         }
3025 out:
3026         ext4_ext_drop_refs(path);
3027         kfree(path);
3028         path = NULL;
3029         if (err == -EAGAIN)
3030                 goto again;
3031         ext4_journal_stop(handle);
3032
3033         return err;
3034 }
3035
3036 /*
3037  * called at mount time
3038  */
3039 void ext4_ext_init(struct super_block *sb)
3040 {
3041         /*
3042          * possible initialization would be here
3043          */
3044
3045         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
3046 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3047                 printk(KERN_INFO "EXT4-fs: file extents enabled"
3048 #ifdef AGGRESSIVE_TEST
3049                        ", aggressive tests"
3050 #endif
3051 #ifdef CHECK_BINSEARCH
3052                        ", check binsearch"
3053 #endif
3054 #ifdef EXTENTS_STATS
3055                        ", stats"
3056 #endif
3057                        "\n");
3058 #endif
3059 #ifdef EXTENTS_STATS
3060                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3061                 EXT4_SB(sb)->s_ext_min = 1 << 30;
3062                 EXT4_SB(sb)->s_ext_max = 0;
3063 #endif
3064         }
3065 }
3066
3067 /*
3068  * called at umount time
3069  */
3070 void ext4_ext_release(struct super_block *sb)
3071 {
3072         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3073                 return;
3074
3075 #ifdef EXTENTS_STATS
3076         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3077                 struct ext4_sb_info *sbi = EXT4_SB(sb);
3078                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3079                         sbi->s_ext_blocks, sbi->s_ext_extents,
3080                         sbi->s_ext_blocks / sbi->s_ext_extents);
3081                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3082                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3083         }
3084 #endif
3085 }
3086
3087 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3088 {
3089         ext4_lblk_t  ee_block;
3090         ext4_fsblk_t ee_pblock;
3091         unsigned int ee_len;
3092
3093         ee_block  = le32_to_cpu(ex->ee_block);
3094         ee_len    = ext4_ext_get_actual_len(ex);
3095         ee_pblock = ext4_ext_pblock(ex);
3096
3097         if (ee_len == 0)
3098                 return 0;
3099
3100         return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3101                                      EXTENT_STATUS_WRITTEN);
3102 }
3103
3104 /* FIXME!! we need to try to merge to left or right after zero-out  */
3105 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3106 {
3107         ext4_fsblk_t ee_pblock;
3108         unsigned int ee_len;
3109         int ret;
3110
3111         ee_len    = ext4_ext_get_actual_len(ex);
3112         ee_pblock = ext4_ext_pblock(ex);
3113
3114         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
3115         if (ret > 0)
3116                 ret = 0;
3117
3118         return ret;
3119 }
3120
3121 /*
3122  * ext4_split_extent_at() splits an extent at given block.
3123  *
3124  * @handle: the journal handle
3125  * @inode: the file inode
3126  * @path: the path to the extent
3127  * @split: the logical block where the extent is splitted.
3128  * @split_flags: indicates if the extent could be zeroout if split fails, and
3129  *               the states(init or unwritten) of new extents.
3130  * @flags: flags used to insert new extent to extent tree.
3131  *
3132  *
3133  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3134  * of which are deterimined by split_flag.
3135  *
3136  * There are two cases:
3137  *  a> the extent are splitted into two extent.
3138  *  b> split is not needed, and just mark the extent.
3139  *
3140  * return 0 on success.
3141  */
3142 static int ext4_split_extent_at(handle_t *handle,
3143                              struct inode *inode,
3144                              struct ext4_ext_path **ppath,
3145                              ext4_lblk_t split,
3146                              int split_flag,
3147                              int flags)
3148 {
3149         struct ext4_ext_path *path = *ppath;
3150         ext4_fsblk_t newblock;
3151         ext4_lblk_t ee_block;
3152         struct ext4_extent *ex, newex, orig_ex, zero_ex;
3153         struct ext4_extent *ex2 = NULL;
3154         unsigned int ee_len, depth;
3155         int err = 0;
3156
3157         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3158                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3159
3160         ext_debug("ext4_split_extents_at: inode %lu, logical"
3161                 "block %llu\n", inode->i_ino, (unsigned long long)split);
3162
3163         ext4_ext_show_leaf(inode, path);
3164
3165         depth = ext_depth(inode);
3166         ex = path[depth].p_ext;
3167         ee_block = le32_to_cpu(ex->ee_block);
3168         ee_len = ext4_ext_get_actual_len(ex);
3169         newblock = split - ee_block + ext4_ext_pblock(ex);
3170
3171         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3172         BUG_ON(!ext4_ext_is_unwritten(ex) &&
3173                split_flag & (EXT4_EXT_MAY_ZEROOUT |
3174                              EXT4_EXT_MARK_UNWRIT1 |
3175                              EXT4_EXT_MARK_UNWRIT2));
3176
3177         err = ext4_ext_get_access(handle, inode, path + depth);
3178         if (err)
3179                 goto out;
3180
3181         if (split == ee_block) {
3182                 /*
3183                  * case b: block @split is the block that the extent begins with
3184                  * then we just change the state of the extent, and splitting
3185                  * is not needed.
3186                  */
3187                 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3188                         ext4_ext_mark_unwritten(ex);
3189                 else
3190                         ext4_ext_mark_initialized(ex);
3191
3192                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3193                         ext4_ext_try_to_merge(handle, inode, path, ex);
3194
3195                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3196                 goto out;
3197         }
3198
3199         /* case a */
3200         memcpy(&orig_ex, ex, sizeof(orig_ex));
3201         ex->ee_len = cpu_to_le16(split - ee_block);
3202         if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3203                 ext4_ext_mark_unwritten(ex);
3204
3205         /*
3206          * path may lead to new leaf, not to original leaf any more
3207          * after ext4_ext_insert_extent() returns,
3208          */
3209         err = ext4_ext_dirty(handle, inode, path + depth);
3210         if (err)
3211                 goto fix_extent_len;
3212
3213         ex2 = &newex;
3214         ex2->ee_block = cpu_to_le32(split);
3215         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3216         ext4_ext_store_pblock(ex2, newblock);
3217         if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3218                 ext4_ext_mark_unwritten(ex2);
3219
3220         err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3221         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3222                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3223                         if (split_flag & EXT4_EXT_DATA_VALID1) {
3224                                 err = ext4_ext_zeroout(inode, ex2);
3225                                 zero_ex.ee_block = ex2->ee_block;
3226                                 zero_ex.ee_len = cpu_to_le16(
3227                                                 ext4_ext_get_actual_len(ex2));
3228                                 ext4_ext_store_pblock(&zero_ex,
3229                                                       ext4_ext_pblock(ex2));
3230                         } else {
3231                                 err = ext4_ext_zeroout(inode, ex);
3232                                 zero_ex.ee_block = ex->ee_block;
3233                                 zero_ex.ee_len = cpu_to_le16(
3234                                                 ext4_ext_get_actual_len(ex));
3235                                 ext4_ext_store_pblock(&zero_ex,
3236                                                       ext4_ext_pblock(ex));
3237                         }
3238                 } else {
3239                         err = ext4_ext_zeroout(inode, &orig_ex);
3240                         zero_ex.ee_block = orig_ex.ee_block;
3241                         zero_ex.ee_len = cpu_to_le16(
3242                                                 ext4_ext_get_actual_len(&orig_ex));
3243                         ext4_ext_store_pblock(&zero_ex,
3244                                               ext4_ext_pblock(&orig_ex));
3245                 }
3246
3247                 if (err)
3248                         goto fix_extent_len;
3249                 /* update the extent length and mark as initialized */
3250                 ex->ee_len = cpu_to_le16(ee_len);
3251                 ext4_ext_try_to_merge(handle, inode, path, ex);
3252                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3253                 if (err)
3254                         goto fix_extent_len;
3255
3256                 /* update extent status tree */
3257                 err = ext4_zeroout_es(inode, &zero_ex);
3258
3259                 goto out;
3260         } else if (err)
3261                 goto fix_extent_len;
3262
3263 out:
3264         ext4_ext_show_leaf(inode, path);
3265         return err;
3266
3267 fix_extent_len:
3268         ex->ee_len = orig_ex.ee_len;
3269         ext4_ext_dirty(handle, inode, path + path->p_depth);
3270         return err;
3271 }
3272
3273 /*
3274  * ext4_split_extents() splits an extent and mark extent which is covered
3275  * by @map as split_flags indicates
3276  *
3277  * It may result in splitting the extent into multiple extents (up to three)
3278  * There are three possibilities:
3279  *   a> There is no split required
3280  *   b> Splits in two extents: Split is happening at either end of the extent
3281  *   c> Splits in three extents: Somone is splitting in middle of the extent
3282  *
3283  */
3284 static int ext4_split_extent(handle_t *handle,
3285                               struct inode *inode,
3286                               struct ext4_ext_path **ppath,
3287                               struct ext4_map_blocks *map,
3288                               int split_flag,
3289                               int flags)
3290 {
3291         struct ext4_ext_path *path = *ppath;
3292         ext4_lblk_t ee_block;
3293         struct ext4_extent *ex;
3294         unsigned int ee_len, depth;
3295         int err = 0;
3296         int unwritten;
3297         int split_flag1, flags1;
3298         int allocated = map->m_len;
3299
3300         depth = ext_depth(inode);
3301         ex = path[depth].p_ext;
3302         ee_block = le32_to_cpu(ex->ee_block);
3303         ee_len = ext4_ext_get_actual_len(ex);
3304         unwritten = ext4_ext_is_unwritten(ex);
3305
3306         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3307                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3308                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3309                 if (unwritten)
3310                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3311                                        EXT4_EXT_MARK_UNWRIT2;
3312                 if (split_flag & EXT4_EXT_DATA_VALID2)
3313                         split_flag1 |= EXT4_EXT_DATA_VALID1;
3314                 err = ext4_split_extent_at(handle, inode, ppath,
3315                                 map->m_lblk + map->m_len, split_flag1, flags1);
3316                 if (err)
3317                         goto out;
3318         } else {
3319                 allocated = ee_len - (map->m_lblk - ee_block);
3320         }
3321         /*
3322          * Update path is required because previous ext4_split_extent_at() may
3323          * result in split of original leaf or extent zeroout.
3324          */
3325         path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3326         if (IS_ERR(path))
3327                 return PTR_ERR(path);
3328         depth = ext_depth(inode);
3329         ex = path[depth].p_ext;
3330         if (!ex) {
3331                 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3332                                  (unsigned long) map->m_lblk);
3333                 return -EIO;
3334         }
3335         unwritten = ext4_ext_is_unwritten(ex);
3336         split_flag1 = 0;
3337
3338         if (map->m_lblk >= ee_block) {
3339                 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3340                 if (unwritten) {
3341                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3342                         split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3343                                                      EXT4_EXT_MARK_UNWRIT2);
3344                 }
3345                 err = ext4_split_extent_at(handle, inode, ppath,
3346                                 map->m_lblk, split_flag1, flags);
3347                 if (err)
3348                         goto out;
3349         }
3350
3351         ext4_ext_show_leaf(inode, path);
3352 out:
3353         return err ? err : allocated;
3354 }
3355
3356 /*
3357  * This function is called by ext4_ext_map_blocks() if someone tries to write
3358  * to an unwritten extent. It may result in splitting the unwritten
3359  * extent into multiple extents (up to three - one initialized and two
3360  * unwritten).
3361  * There are three possibilities:
3362  *   a> There is no split required: Entire extent should be initialized
3363  *   b> Splits in two extents: Write is happening at either end of the extent
3364  *   c> Splits in three extents: Somone is writing in middle of the extent
3365  *
3366  * Pre-conditions:
3367  *  - The extent pointed to by 'path' is unwritten.
3368  *  - The extent pointed to by 'path' contains a superset
3369  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3370  *
3371  * Post-conditions on success:
3372  *  - the returned value is the number of blocks beyond map->l_lblk
3373  *    that are allocated and initialized.
3374  *    It is guaranteed to be >= map->m_len.
3375  */
3376 static int ext4_ext_convert_to_initialized(handle_t *handle,
3377                                            struct inode *inode,
3378                                            struct ext4_map_blocks *map,
3379                                            struct ext4_ext_path **ppath,
3380                                            int flags)
3381 {
3382         struct ext4_ext_path *path = *ppath;
3383         struct ext4_sb_info *sbi;
3384         struct ext4_extent_header *eh;
3385         struct ext4_map_blocks split_map;
3386         struct ext4_extent zero_ex;
3387         struct ext4_extent *ex, *abut_ex;
3388         ext4_lblk_t ee_block, eof_block;
3389         unsigned int ee_len, depth, map_len = map->m_len;
3390         int allocated = 0, max_zeroout = 0;
3391         int err = 0;
3392         int split_flag = 0;
3393
3394         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3395                 "block %llu, max_blocks %u\n", inode->i_ino,
3396                 (unsigned long long)map->m_lblk, map_len);
3397
3398         sbi = EXT4_SB(inode->i_sb);
3399         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3400                 inode->i_sb->s_blocksize_bits;
3401         if (eof_block < map->m_lblk + map_len)
3402                 eof_block = map->m_lblk + map_len;
3403
3404         depth = ext_depth(inode);
3405         eh = path[depth].p_hdr;
3406         ex = path[depth].p_ext;
3407         ee_block = le32_to_cpu(ex->ee_block);
3408         ee_len = ext4_ext_get_actual_len(ex);
3409         zero_ex.ee_len = 0;
3410
3411         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3412
3413         /* Pre-conditions */
3414         BUG_ON(!ext4_ext_is_unwritten(ex));
3415         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3416
3417         /*
3418          * Attempt to transfer newly initialized blocks from the currently
3419          * unwritten extent to its neighbor. This is much cheaper
3420          * than an insertion followed by a merge as those involve costly
3421          * memmove() calls. Transferring to the left is the common case in
3422          * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3423          * followed by append writes.
3424          *
3425          * Limitations of the current logic:
3426          *  - L1: we do not deal with writes covering the whole extent.
3427          *    This would require removing the extent if the transfer
3428          *    is possible.
3429          *  - L2: we only attempt to merge with an extent stored in the
3430          *    same extent tree node.
3431          */
3432         if ((map->m_lblk == ee_block) &&
3433                 /* See if we can merge left */
3434                 (map_len < ee_len) &&           /*L1*/
3435                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L2*/
3436                 ext4_lblk_t prev_lblk;
3437                 ext4_fsblk_t prev_pblk, ee_pblk;
3438                 unsigned int prev_len;
3439
3440                 abut_ex = ex - 1;
3441                 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3442                 prev_len = ext4_ext_get_actual_len(abut_ex);
3443                 prev_pblk = ext4_ext_pblock(abut_ex);
3444                 ee_pblk = ext4_ext_pblock(ex);
3445
3446                 /*
3447                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3448                  * upon those conditions:
3449                  * - C1: abut_ex is initialized,
3450                  * - C2: abut_ex is logically abutting ex,
3451                  * - C3: abut_ex is physically abutting ex,
3452                  * - C4: abut_ex can receive the additional blocks without
3453                  *   overflowing the (initialized) length limit.
3454                  */
3455                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
3456                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3457                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3458                         (prev_len < (EXT_INIT_MAX_LEN - map_len))) {    /*C4*/
3459                         err = ext4_ext_get_access(handle, inode, path + depth);
3460                         if (err)
3461                                 goto out;
3462
3463                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3464                                 map, ex, abut_ex);
3465
3466                         /* Shift the start of ex by 'map_len' blocks */
3467                         ex->ee_block = cpu_to_le32(ee_block + map_len);
3468                         ext4_ext_store_pblock(ex, ee_pblk + map_len);
3469                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3470                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
3471
3472                         /* Extend abut_ex by 'map_len' blocks */
3473                         abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3474
3475                         /* Result: number of initialized blocks past m_lblk */
3476                         allocated = map_len;
3477                 }
3478         } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3479                    (map_len < ee_len) &&        /*L1*/
3480                    ex < EXT_LAST_EXTENT(eh)) {  /*L2*/
3481                 /* See if we can merge right */
3482                 ext4_lblk_t next_lblk;
3483                 ext4_fsblk_t next_pblk, ee_pblk;
3484                 unsigned int next_len;
3485
3486                 abut_ex = ex + 1;
3487                 next_lblk = le32_to_cpu(abut_ex->ee_block);
3488                 next_len = ext4_ext_get_actual_len(abut_ex);
3489                 next_pblk = ext4_ext_pblock(abut_ex);
3490                 ee_pblk = ext4_ext_pblock(ex);
3491
3492                 /*
3493                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3494                  * upon those conditions:
3495                  * - C1: abut_ex is initialized,
3496                  * - C2: abut_ex is logically abutting ex,
3497                  * - C3: abut_ex is physically abutting ex,
3498                  * - C4: abut_ex can receive the additional blocks without
3499                  *   overflowing the (initialized) length limit.
3500                  */
3501                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
3502                     ((map->m_lblk + map_len) == next_lblk) &&           /*C2*/
3503                     ((ee_pblk + ee_len) == next_pblk) &&                /*C3*/
3504                     (next_len < (EXT_INIT_MAX_LEN - map_len))) {        /*C4*/
3505                         err = ext4_ext_get_access(handle, inode, path + depth);
3506                         if (err)
3507                                 goto out;
3508
3509                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3510                                 map, ex, abut_ex);
3511
3512                         /* Shift the start of abut_ex by 'map_len' blocks */
3513                         abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3514                         ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3515                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3516                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
3517
3518                         /* Extend abut_ex by 'map_len' blocks */
3519                         abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3520
3521                         /* Result: number of initialized blocks past m_lblk */
3522                         allocated = map_len;
3523                 }
3524         }
3525         if (allocated) {
3526                 /* Mark the block containing both extents as dirty */
3527                 ext4_ext_dirty(handle, inode, path + depth);
3528
3529                 /* Update path to point to the right extent */
3530                 path[depth].p_ext = abut_ex;
3531                 goto out;
3532         } else
3533                 allocated = ee_len - (map->m_lblk - ee_block);
3534
3535         WARN_ON(map->m_lblk < ee_block);
3536         /*
3537          * It is safe to convert extent to initialized via explicit
3538          * zeroout only if extent is fully inside i_size or new_size.
3539          */
3540         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3541
3542         if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3543                 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3544                         (inode->i_sb->s_blocksize_bits - 10);
3545
3546         /* If extent is less than s_max_zeroout_kb, zeroout directly */
3547         if (max_zeroout && (ee_len <= max_zeroout)) {
3548                 err = ext4_ext_zeroout(inode, ex);
3549                 if (err)
3550                         goto out;
3551                 zero_ex.ee_block = ex->ee_block;
3552                 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3553                 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3554
3555                 err = ext4_ext_get_access(handle, inode, path + depth);
3556                 if (err)
3557                         goto out;
3558                 ext4_ext_mark_initialized(ex);
3559                 ext4_ext_try_to_merge(handle, inode, path, ex);
3560                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3561                 goto out;
3562         }
3563
3564         /*
3565          * four cases:
3566          * 1. split the extent into three extents.
3567          * 2. split the extent into two extents, zeroout the first half.
3568          * 3. split the extent into two extents, zeroout the second half.
3569          * 4. split the extent into two extents with out zeroout.
3570          */
3571         split_map.m_lblk = map->m_lblk;
3572         split_map.m_len = map->m_len;
3573
3574         if (max_zeroout && (allocated > map->m_len)) {
3575                 if (allocated <= max_zeroout) {
3576                         /* case 3 */
3577                         zero_ex.ee_block =
3578                                          cpu_to_le32(map->m_lblk);
3579                         zero_ex.ee_len = cpu_to_le16(allocated);
3580                         ext4_ext_store_pblock(&zero_ex,
3581                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3582                         err = ext4_ext_zeroout(inode, &zero_ex);
3583                         if (err)
3584                                 goto out;
3585                         split_map.m_lblk = map->m_lblk;
3586                         split_map.m_len = allocated;
3587                 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3588                         /* case 2 */
3589                         if (map->m_lblk != ee_block) {
3590                                 zero_ex.ee_block = ex->ee_block;
3591                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3592                                                         ee_block);
3593                                 ext4_ext_store_pblock(&zero_ex,
3594                                                       ext4_ext_pblock(ex));
3595                                 err = ext4_ext_zeroout(inode, &zero_ex);
3596                                 if (err)
3597                                         goto out;
3598                         }
3599
3600                         split_map.m_lblk = ee_block;
3601                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3602                         allocated = map->m_len;
3603                 }
3604         }
3605
3606         allocated = ext4_split_extent(handle, inode, ppath,
3607                                       &split_map, split_flag, flags);
3608         if (allocated < 0)
3609                 err = allocated;
3610
3611 out:
3612         /* If we have gotten a failure, don't zero out status tree */
3613         if (!err)
3614                 err = ext4_zeroout_es(inode, &zero_ex);
3615         return err ? err : allocated;
3616 }
3617
3618 /*
3619  * This function is called by ext4_ext_map_blocks() from
3620  * ext4_get_blocks_dio_write() when DIO to write
3621  * to an unwritten extent.
3622  *
3623  * Writing to an unwritten extent may result in splitting the unwritten
3624  * extent into multiple initialized/unwritten extents (up to three)
3625  * There are three possibilities:
3626  *   a> There is no split required: Entire extent should be unwritten
3627  *   b> Splits in two extents: Write is happening at either end of the extent
3628  *   c> Splits in three extents: Somone is writing in middle of the extent
3629  *
3630  * This works the same way in the case of initialized -> unwritten conversion.
3631  *
3632  * One of more index blocks maybe needed if the extent tree grow after
3633  * the unwritten extent split. To prevent ENOSPC occur at the IO
3634  * complete, we need to split the unwritten extent before DIO submit
3635  * the IO. The unwritten extent called at this time will be split
3636  * into three unwritten extent(at most). After IO complete, the part
3637  * being filled will be convert to initialized by the end_io callback function
3638  * via ext4_convert_unwritten_extents().
3639  *
3640  * Returns the size of unwritten extent to be written on success.
3641  */
3642 static int ext4_split_convert_extents(handle_t *handle,
3643                                         struct inode *inode,
3644                                         struct ext4_map_blocks *map,
3645                                         struct ext4_ext_path **ppath,
3646                                         int flags)
3647 {
3648         struct ext4_ext_path *path = *ppath;
3649         ext4_lblk_t eof_block;
3650         ext4_lblk_t ee_block;
3651         struct ext4_extent *ex;
3652         unsigned int ee_len;
3653         int split_flag = 0, depth;
3654
3655         ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
3656                   __func__, inode->i_ino,
3657                   (unsigned long long)map->m_lblk, map->m_len);
3658
3659         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3660                 inode->i_sb->s_blocksize_bits;
3661         if (eof_block < map->m_lblk + map->m_len)
3662                 eof_block = map->m_lblk + map->m_len;
3663         /*
3664          * It is safe to convert extent to initialized via explicit
3665          * zeroout only if extent is fully insde i_size or new_size.
3666          */
3667         depth = ext_depth(inode);
3668         ex = path[depth].p_ext;
3669         ee_block = le32_to_cpu(ex->ee_block);
3670         ee_len = ext4_ext_get_actual_len(ex);
3671
3672         /* Convert to unwritten */
3673         if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3674                 split_flag |= EXT4_EXT_DATA_VALID1;
3675         /* Convert to initialized */
3676         } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3677                 split_flag |= ee_block + ee_len <= eof_block ?
3678                               EXT4_EXT_MAY_ZEROOUT : 0;
3679                 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3680         }
3681         flags |= EXT4_GET_BLOCKS_PRE_IO;
3682         return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3683 }
3684
3685 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3686                                                 struct inode *inode,
3687                                                 struct ext4_map_blocks *map,
3688                                                 struct ext4_ext_path **ppath)
3689 {
3690         struct ext4_ext_path *path = *ppath;
3691         struct ext4_extent *ex;
3692         ext4_lblk_t ee_block;
3693         unsigned int ee_len;
3694         int depth;
3695         int err = 0;
3696
3697         depth = ext_depth(inode);
3698         ex = path[depth].p_ext;
3699         ee_block = le32_to_cpu(ex->ee_block);
3700         ee_len = ext4_ext_get_actual_len(ex);
3701
3702         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3703                 "block %llu, max_blocks %u\n", inode->i_ino,
3704                   (unsigned long long)ee_block, ee_len);
3705
3706         /* If extent is larger than requested it is a clear sign that we still
3707          * have some extent state machine issues left. So extent_split is still
3708          * required.
3709          * TODO: Once all related issues will be fixed this situation should be
3710          * illegal.
3711          */
3712         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3713 #ifdef EXT4_DEBUG
3714                 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3715                              " len %u; IO logical block %llu, len %u\n",
3716                              inode->i_ino, (unsigned long long)ee_block, ee_len,
3717                              (unsigned long long)map->m_lblk, map->m_len);
3718 #endif
3719                 err = ext4_split_convert_extents(handle, inode, map, ppath,
3720                                                  EXT4_GET_BLOCKS_CONVERT);
3721                 if (err < 0)
3722                         return err;
3723                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3724                 if (IS_ERR(path))
3725                         return PTR_ERR(path);
3726                 depth = ext_depth(inode);
3727                 ex = path[depth].p_ext;
3728         }
3729
3730         err = ext4_ext_get_access(handle, inode, path + depth);
3731         if (err)
3732                 goto out;
3733         /* first mark the extent as initialized */
3734         ext4_ext_mark_initialized(ex);
3735
3736         /* note: ext4_ext_correct_indexes() isn't needed here because
3737          * borders are not changed
3738          */
3739         ext4_ext_try_to_merge(handle, inode, path, ex);
3740
3741         /* Mark modified extent as dirty */
3742         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3743 out:
3744         ext4_ext_show_leaf(inode, path);
3745         return err;
3746 }
3747
3748 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3749                         sector_t block, int count)
3750 {
3751         int i;
3752         for (i = 0; i < count; i++)
3753                 unmap_underlying_metadata(bdev, block + i);
3754 }
3755
3756 /*
3757  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3758  */
3759 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3760                               ext4_lblk_t lblk,
3761                               struct ext4_ext_path *path,
3762                               unsigned int len)
3763 {
3764         int i, depth;
3765         struct ext4_extent_header *eh;
3766         struct ext4_extent *last_ex;
3767
3768         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3769                 return 0;
3770
3771         depth = ext_depth(inode);
3772         eh = path[depth].p_hdr;
3773
3774         /*
3775          * We're going to remove EOFBLOCKS_FL entirely in future so we
3776          * do not care for this case anymore. Simply remove the flag
3777          * if there are no extents.
3778          */
3779         if (unlikely(!eh->eh_entries))
3780                 goto out;
3781         last_ex = EXT_LAST_EXTENT(eh);
3782         /*
3783          * We should clear the EOFBLOCKS_FL flag if we are writing the
3784          * last block in the last extent in the file.  We test this by
3785          * first checking to see if the caller to
3786          * ext4_ext_get_blocks() was interested in the last block (or
3787          * a block beyond the last block) in the current extent.  If
3788          * this turns out to be false, we can bail out from this
3789          * function immediately.
3790          */
3791         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3792             ext4_ext_get_actual_len(last_ex))
3793                 return 0;
3794         /*
3795          * If the caller does appear to be planning to write at or
3796          * beyond the end of the current extent, we then test to see
3797          * if the current extent is the last extent in the file, by
3798          * checking to make sure it was reached via the rightmost node
3799          * at each level of the tree.
3800          */
3801         for (i = depth-1; i >= 0; i--)
3802                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3803                         return 0;
3804 out:
3805         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3806         return ext4_mark_inode_dirty(handle, inode);
3807 }
3808
3809 /**
3810  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3811  *
3812  * Return 1 if there is a delalloc block in the range, otherwise 0.
3813  */
3814 int ext4_find_delalloc_range(struct inode *inode,
3815                              ext4_lblk_t lblk_start,
3816                              ext4_lblk_t lblk_end)
3817 {
3818         struct extent_status es;
3819
3820         ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3821         if (es.es_len == 0)
3822                 return 0; /* there is no delay extent in this tree */
3823         else if (es.es_lblk <= lblk_start &&
3824                  lblk_start < es.es_lblk + es.es_len)
3825                 return 1;
3826         else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3827                 return 1;
3828         else
3829                 return 0;
3830 }
3831
3832 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3833 {
3834         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3835         ext4_lblk_t lblk_start, lblk_end;
3836         lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3837         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3838
3839         return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3840 }
3841
3842 /**
3843  * Determines how many complete clusters (out of those specified by the 'map')
3844  * are under delalloc and were reserved quota for.
3845  * This function is called when we are writing out the blocks that were
3846  * originally written with their allocation delayed, but then the space was
3847  * allocated using fallocate() before the delayed allocation could be resolved.
3848  * The cases to look for are:
3849  * ('=' indicated delayed allocated blocks
3850  *  '-' indicates non-delayed allocated blocks)
3851  * (a) partial clusters towards beginning and/or end outside of allocated range
3852  *     are not delalloc'ed.
3853  *      Ex:
3854  *      |----c---=|====c====|====c====|===-c----|
3855  *               |++++++ allocated ++++++|
3856  *      ==> 4 complete clusters in above example
3857  *
3858  * (b) partial cluster (outside of allocated range) towards either end is
3859  *     marked for delayed allocation. In this case, we will exclude that
3860  *     cluster.
3861  *      Ex:
3862  *      |----====c========|========c========|
3863  *           |++++++ allocated ++++++|
3864  *      ==> 1 complete clusters in above example
3865  *
3866  *      Ex:
3867  *      |================c================|
3868  *            |++++++ allocated ++++++|
3869  *      ==> 0 complete clusters in above example
3870  *
3871  * The ext4_da_update_reserve_space will be called only if we
3872  * determine here that there were some "entire" clusters that span
3873  * this 'allocated' range.
3874  * In the non-bigalloc case, this function will just end up returning num_blks
3875  * without ever calling ext4_find_delalloc_range.
3876  */
3877 static unsigned int
3878 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3879                            unsigned int num_blks)
3880 {
3881         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3882         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3883         ext4_lblk_t lblk_from, lblk_to, c_offset;
3884         unsigned int allocated_clusters = 0;
3885
3886         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3887         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3888
3889         /* max possible clusters for this allocation */
3890         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3891
3892         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3893
3894         /* Check towards left side */
3895         c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3896         if (c_offset) {
3897                 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3898                 lblk_to = lblk_from + c_offset - 1;
3899
3900                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3901                         allocated_clusters--;
3902         }
3903
3904         /* Now check towards right. */
3905         c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3906         if (allocated_clusters && c_offset) {
3907                 lblk_from = lblk_start + num_blks;
3908                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3909
3910                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3911                         allocated_clusters--;
3912         }
3913
3914         return allocated_clusters;
3915 }
3916
3917 static int
3918 convert_initialized_extent(handle_t *handle, struct inode *inode,
3919                            struct ext4_map_blocks *map,
3920                            struct ext4_ext_path **ppath, int flags,
3921                            unsigned int allocated, ext4_fsblk_t newblock)
3922 {
3923         struct ext4_ext_path *path = *ppath;
3924         struct ext4_extent *ex;
3925         ext4_lblk_t ee_block;
3926         unsigned int ee_len;
3927         int depth;
3928         int err = 0;
3929
3930         /*
3931          * Make sure that the extent is no bigger than we support with
3932          * unwritten extent
3933          */
3934         if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3935                 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3936
3937         depth = ext_depth(inode);
3938         ex = path[depth].p_ext;
3939         ee_block = le32_to_cpu(ex->ee_block);
3940         ee_len = ext4_ext_get_actual_len(ex);
3941
3942         ext_debug("%s: inode %lu, logical"
3943                 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3944                   (unsigned long long)ee_block, ee_len);
3945
3946         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3947                 err = ext4_split_convert_extents(handle, inode, map, ppath,
3948                                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3949                 if (err < 0)
3950                         return err;
3951                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3952                 if (IS_ERR(path))
3953                         return PTR_ERR(path);
3954                 depth = ext_depth(inode);
3955                 ex = path[depth].p_ext;
3956                 if (!ex) {
3957                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3958                                          (unsigned long) map->m_lblk);
3959                         return -EIO;
3960                 }
3961         }
3962
3963         err = ext4_ext_get_access(handle, inode, path + depth);
3964         if (err)
3965                 return err;
3966         /* first mark the extent as unwritten */
3967         ext4_ext_mark_unwritten(ex);
3968
3969         /* note: ext4_ext_correct_indexes() isn't needed here because
3970          * borders are not changed
3971          */
3972         ext4_ext_try_to_merge(handle, inode, path, ex);
3973
3974         /* Mark modified extent as dirty */
3975         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3976         if (err)
3977                 return err;
3978         ext4_ext_show_leaf(inode, path);
3979
3980         ext4_update_inode_fsync_trans(handle, inode, 1);
3981         err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
3982         if (err)
3983                 return err;
3984         map->m_flags |= EXT4_MAP_UNWRITTEN;
3985         if (allocated > map->m_len)
3986                 allocated = map->m_len;
3987         map->m_len = allocated;
3988         return allocated;
3989 }
3990
3991 static int
3992 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3993                         struct ext4_map_blocks *map,
3994                         struct ext4_ext_path **ppath, int flags,
3995                         unsigned int allocated, ext4_fsblk_t newblock)
3996 {
3997         struct ext4_ext_path *path = *ppath;
3998         int ret = 0;
3999         int err = 0;
4000         ext4_io_end_t *io = ext4_inode_aio(inode);
4001
4002         ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
4003                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
4004                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
4005                   flags, allocated);
4006         ext4_ext_show_leaf(inode, path);
4007
4008         /*
4009          * When writing into unwritten space, we should not fail to
4010          * allocate metadata blocks for the new extent block if needed.
4011          */
4012         flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
4013
4014         trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
4015                                                     allocated, newblock);
4016
4017         /* get_block() before submit the IO, split the extent */
4018         if (flags & EXT4_GET_BLOCKS_PRE_IO) {
4019                 ret = ext4_split_convert_extents(handle, inode, map, ppath,
4020                                          flags | EXT4_GET_BLOCKS_CONVERT);
4021                 if (ret <= 0)
4022                         goto out;
4023                 /*
4024                  * Flag the inode(non aio case) or end_io struct (aio case)
4025                  * that this IO needs to conversion to written when IO is
4026                  * completed
4027                  */
4028                 if (io)
4029                         ext4_set_io_unwritten_flag(inode, io);
4030                 else
4031                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
4032                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4033                 goto out;
4034         }
4035         /* IO end_io complete, convert the filled extent to written */
4036         if (flags & EXT4_GET_BLOCKS_CONVERT) {
4037                 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
4038                                                            ppath);
4039                 if (ret >= 0) {
4040                         ext4_update_inode_fsync_trans(handle, inode, 1);
4041                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
4042                                                  path, map->m_len);
4043                 } else
4044                         err = ret;
4045                 map->m_flags |= EXT4_MAP_MAPPED;
4046                 map->m_pblk = newblock;
4047                 if (allocated > map->m_len)
4048                         allocated = map->m_len;
4049                 map->m_len = allocated;
4050                 goto out2;
4051         }
4052         /* buffered IO case */
4053         /*
4054          * repeat fallocate creation request
4055          * we already have an unwritten extent
4056          */
4057         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4058                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4059                 goto map_out;
4060         }
4061
4062         /* buffered READ or buffered write_begin() lookup */
4063         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4064                 /*
4065                  * We have blocks reserved already.  We
4066                  * return allocated blocks so that delalloc
4067                  * won't do block reservation for us.  But
4068                  * the buffer head will be unmapped so that
4069                  * a read from the block returns 0s.
4070                  */
4071                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4072                 goto out1;
4073         }
4074
4075         /* buffered write, writepage time, convert*/
4076         ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
4077         if (ret >= 0)
4078                 ext4_update_inode_fsync_trans(handle, inode, 1);
4079 out:
4080         if (ret <= 0) {
4081                 err = ret;
4082                 goto out2;
4083         } else
4084                 allocated = ret;
4085         map->m_flags |= EXT4_MAP_NEW;
4086         /*
4087          * if we allocated more blocks than requested
4088          * we need to make sure we unmap the extra block
4089          * allocated. The actual needed block will get
4090          * unmapped later when we find the buffer_head marked
4091          * new.
4092          */
4093         if (allocated > map->m_len) {
4094                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
4095                                         newblock + map->m_len,
4096                                         allocated - map->m_len);
4097                 allocated = map->m_len;
4098         }
4099         map->m_len = allocated;
4100
4101         /*
4102          * If we have done fallocate with the offset that is already
4103          * delayed allocated, we would have block reservation
4104          * and quota reservation done in the delayed write path.
4105          * But fallocate would have already updated quota and block
4106          * count for this offset. So cancel these reservation
4107          */
4108         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4109                 unsigned int reserved_clusters;
4110                 reserved_clusters = get_reserved_cluster_alloc(inode,
4111                                 map->m_lblk, map->m_len);
4112                 if (reserved_clusters)
4113                         ext4_da_update_reserve_space(inode,
4114                                                      reserved_clusters,
4115                                                      0);
4116         }
4117
4118 map_out:
4119         map->m_flags |= EXT4_MAP_MAPPED;
4120         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
4121                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
4122                                          map->m_len);
4123                 if (err < 0)
4124                         goto out2;
4125         }
4126 out1:
4127         if (allocated > map->m_len)
4128                 allocated = map->m_len;
4129         ext4_ext_show_leaf(inode, path);
4130         map->m_pblk = newblock;
4131         map->m_len = allocated;
4132 out2:
4133         return err ? err : allocated;
4134 }
4135
4136 /*
4137  * get_implied_cluster_alloc - check to see if the requested
4138  * allocation (in the map structure) overlaps with a cluster already
4139  * allocated in an extent.
4140  *      @sb     The filesystem superblock structure
4141  *      @map    The requested lblk->pblk mapping
4142  *      @ex     The extent structure which might contain an implied
4143  *                      cluster allocation
4144  *
4145  * This function is called by ext4_ext_map_blocks() after we failed to
4146  * find blocks that were already in the inode's extent tree.  Hence,
4147  * we know that the beginning of the requested region cannot overlap
4148  * the extent from the inode's extent tree.  There are three cases we
4149  * want to catch.  The first is this case:
4150  *
4151  *               |--- cluster # N--|
4152  *    |--- extent ---|  |---- requested region ---|
4153  *                      |==========|
4154  *
4155  * The second case that we need to test for is this one:
4156  *
4157  *   |--------- cluster # N ----------------|
4158  *         |--- requested region --|   |------- extent ----|
4159  *         |=======================|
4160  *
4161  * The third case is when the requested region lies between two extents
4162  * within the same cluster:
4163  *          |------------- cluster # N-------------|
4164  * |----- ex -----|                  |---- ex_right ----|
4165  *                  |------ requested region ------|
4166  *                  |================|
4167  *
4168  * In each of the above cases, we need to set the map->m_pblk and
4169  * map->m_len so it corresponds to the return the extent labelled as
4170  * "|====|" from cluster #N, since it is already in use for data in
4171  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
4172  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4173  * as a new "allocated" block region.  Otherwise, we will return 0 and
4174  * ext4_ext_map_blocks() will then allocate one or more new clusters
4175  * by calling ext4_mb_new_blocks().
4176  */
4177 static int get_implied_cluster_alloc(struct super_block *sb,
4178                                      struct ext4_map_blocks *map,
4179                                      struct ext4_extent *ex,
4180                                      struct ext4_ext_path *path)
4181 {
4182         struct ext4_sb_info *sbi = EXT4_SB(sb);
4183         ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4184         ext4_lblk_t ex_cluster_start, ex_cluster_end;
4185         ext4_lblk_t rr_cluster_start;
4186         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4187         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4188         unsigned short ee_len = ext4_ext_get_actual_len(ex);
4189
4190         /* The extent passed in that we are trying to match */
4191         ex_cluster_start = EXT4_B2C(sbi, ee_block);
4192         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4193
4194         /* The requested region passed into ext4_map_blocks() */
4195         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4196
4197         if ((rr_cluster_start == ex_cluster_end) ||
4198             (rr_cluster_start == ex_cluster_start)) {
4199                 if (rr_cluster_start == ex_cluster_end)
4200                         ee_start += ee_len - 1;
4201                 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4202                 map->m_len = min(map->m_len,
4203                                  (unsigned) sbi->s_cluster_ratio - c_offset);
4204                 /*
4205                  * Check for and handle this case:
4206                  *
4207                  *   |--------- cluster # N-------------|
4208                  *                     |------- extent ----|
4209                  *         |--- requested region ---|
4210                  *         |===========|
4211                  */
4212
4213                 if (map->m_lblk < ee_block)
4214                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
4215
4216                 /*
4217                  * Check for the case where there is already another allocated
4218                  * block to the right of 'ex' but before the end of the cluster.
4219                  *
4220                  *          |------------- cluster # N-------------|
4221                  * |----- ex -----|                  |---- ex_right ----|
4222                  *                  |------ requested region ------|
4223                  *                  |================|
4224                  */
4225                 if (map->m_lblk > ee_block) {
4226                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4227                         map->m_len = min(map->m_len, next - map->m_lblk);
4228                 }
4229
4230                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4231                 return 1;
4232         }
4233
4234         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4235         return 0;
4236 }
4237
4238
4239 /*
4240  * Block allocation/map/preallocation routine for extents based files
4241  *
4242  *
4243  * Need to be called with
4244  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4245  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4246  *
4247  * return > 0, number of of blocks already mapped/allocated
4248  *          if create == 0 and these are pre-allocated blocks
4249  *              buffer head is unmapped
4250  *          otherwise blocks are mapped
4251  *
4252  * return = 0, if plain look up failed (blocks have not been allocated)
4253  *          buffer head is unmapped
4254  *
4255  * return < 0, error case.
4256  */
4257 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4258                         struct ext4_map_blocks *map, int flags)
4259 {
4260         struct ext4_ext_path *path = NULL;
4261         struct ext4_extent newex, *ex, *ex2;
4262         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4263         ext4_fsblk_t newblock = 0;
4264         int free_on_err = 0, err = 0, depth, ret;
4265         unsigned int allocated = 0, offset = 0;
4266         unsigned int allocated_clusters = 0;
4267         struct ext4_allocation_request ar;
4268         ext4_io_end_t *io = ext4_inode_aio(inode);
4269         ext4_lblk_t cluster_offset;
4270         int set_unwritten = 0;
4271
4272         ext_debug("blocks %u/%u requested for inode %lu\n",
4273                   map->m_lblk, map->m_len, inode->i_ino);
4274         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4275
4276         /* find extent for this block */
4277         path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4278         if (IS_ERR(path)) {
4279                 err = PTR_ERR(path);
4280                 path = NULL;
4281                 goto out2;
4282         }
4283
4284         depth = ext_depth(inode);
4285
4286         /*
4287          * consistent leaf must not be empty;
4288          * this situation is possible, though, _during_ tree modification;
4289          * this is why assert can't be put in ext4_find_extent()
4290          */
4291         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4292                 EXT4_ERROR_INODE(inode, "bad extent address "
4293                                  "lblock: %lu, depth: %d pblock %lld",
4294                                  (unsigned long) map->m_lblk, depth,
4295                                  path[depth].p_block);
4296                 err = -EIO;
4297                 goto out2;
4298         }
4299
4300         ex = path[depth].p_ext;
4301         if (ex) {
4302                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4303                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4304                 unsigned short ee_len;
4305
4306
4307                 /*
4308                  * unwritten extents are treated as holes, except that
4309                  * we split out initialized portions during a write.
4310                  */
4311                 ee_len = ext4_ext_get_actual_len(ex);
4312
4313                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4314
4315                 /* if found extent covers block, simply return it */
4316                 if (in_range(map->m_lblk, ee_block, ee_len)) {
4317                         newblock = map->m_lblk - ee_block + ee_start;
4318                         /* number of remaining blocks in the extent */
4319                         allocated = ee_len - (map->m_lblk - ee_block);
4320                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4321                                   ee_block, ee_len, newblock);
4322
4323                         /*
4324                          * If the extent is initialized check whether the
4325                          * caller wants to convert it to unwritten.
4326                          */
4327                         if ((!ext4_ext_is_unwritten(ex)) &&
4328                             (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4329                                 allocated = convert_initialized_extent(
4330                                                 handle, inode, map, &path,
4331                                                 flags, allocated, newblock);
4332                                 goto out2;
4333                         } else if (!ext4_ext_is_unwritten(ex))
4334                                 goto out;
4335
4336                         ret = ext4_ext_handle_unwritten_extents(
4337                                 handle, inode, map, &path, flags,
4338                                 allocated, newblock);
4339                         if (ret < 0)
4340                                 err = ret;
4341                         else
4342                                 allocated = ret;
4343                         goto out2;
4344                 }
4345         }
4346
4347         if ((sbi->s_cluster_ratio > 1) &&
4348             ext4_find_delalloc_cluster(inode, map->m_lblk))
4349                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4350
4351         /*
4352          * requested block isn't allocated yet;
4353          * we couldn't try to create block if create flag is zero
4354          */
4355         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4356                 /*
4357                  * put just found gap into cache to speed up
4358                  * subsequent requests
4359                  */
4360                 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4361                         ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4362                 goto out2;
4363         }
4364
4365         /*
4366          * Okay, we need to do block allocation.
4367          */
4368         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4369         newex.ee_block = cpu_to_le32(map->m_lblk);
4370         cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4371
4372         /*
4373          * If we are doing bigalloc, check to see if the extent returned
4374          * by ext4_find_extent() implies a cluster we can use.
4375          */
4376         if (cluster_offset && ex &&
4377             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4378                 ar.len = allocated = map->m_len;
4379                 newblock = map->m_pblk;
4380                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4381                 goto got_allocated_blocks;
4382         }
4383
4384         /* find neighbour allocated blocks */
4385         ar.lleft = map->m_lblk;
4386         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4387         if (err)
4388                 goto out2;
4389         ar.lright = map->m_lblk;
4390         ex2 = NULL;
4391         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4392         if (err)
4393                 goto out2;
4394
4395         /* Check if the extent after searching to the right implies a
4396          * cluster we can use. */
4397         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4398             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4399                 ar.len = allocated = map->m_len;
4400                 newblock = map->m_pblk;
4401                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4402                 goto got_allocated_blocks;
4403         }
4404
4405         /*
4406          * See if request is beyond maximum number of blocks we can have in
4407          * a single extent. For an initialized extent this limit is
4408          * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4409          * EXT_UNWRITTEN_MAX_LEN.
4410          */
4411         if (map->m_len > EXT_INIT_MAX_LEN &&
4412             !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4413                 map->m_len = EXT_INIT_MAX_LEN;
4414         else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4415                  (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4416                 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4417
4418         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4419         newex.ee_len = cpu_to_le16(map->m_len);
4420         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4421         if (err)
4422                 allocated = ext4_ext_get_actual_len(&newex);
4423         else
4424                 allocated = map->m_len;
4425
4426         /* allocate new block */
4427         ar.inode = inode;
4428         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4429         ar.logical = map->m_lblk;
4430         /*
4431          * We calculate the offset from the beginning of the cluster
4432          * for the logical block number, since when we allocate a
4433          * physical cluster, the physical block should start at the
4434          * same offset from the beginning of the cluster.  This is
4435          * needed so that future calls to get_implied_cluster_alloc()
4436          * work correctly.
4437          */
4438         offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4439         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4440         ar.goal -= offset;
4441         ar.logical -= offset;
4442         if (S_ISREG(inode->i_mode))
4443                 ar.flags = EXT4_MB_HINT_DATA;
4444         else
4445                 /* disable in-core preallocation for non-regular files */
4446                 ar.flags = 0;
4447         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4448                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4449         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4450                 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4451         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4452         if (!newblock)
4453                 goto out2;
4454         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4455                   ar.goal, newblock, allocated);
4456         free_on_err = 1;
4457         allocated_clusters = ar.len;
4458         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4459         if (ar.len > allocated)
4460                 ar.len = allocated;
4461
4462 got_allocated_blocks:
4463         /* try to insert new extent into found leaf and return */
4464         ext4_ext_store_pblock(&newex, newblock + offset);
4465         newex.ee_len = cpu_to_le16(ar.len);
4466         /* Mark unwritten */
4467         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
4468                 ext4_ext_mark_unwritten(&newex);
4469                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4470                 /*
4471                  * io_end structure was created for every IO write to an
4472                  * unwritten extent. To avoid unnecessary conversion,
4473                  * here we flag the IO that really needs the conversion.
4474                  * For non asycn direct IO case, flag the inode state
4475                  * that we need to perform conversion when IO is done.
4476                  */
4477                 if (flags & EXT4_GET_BLOCKS_PRE_IO)
4478                         set_unwritten = 1;
4479         }
4480
4481         err = 0;
4482         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4483                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4484                                          path, ar.len);
4485         if (!err)
4486                 err = ext4_ext_insert_extent(handle, inode, &path,
4487                                              &newex, flags);
4488
4489         if (!err && set_unwritten) {
4490                 if (io)
4491                         ext4_set_io_unwritten_flag(inode, io);
4492                 else
4493                         ext4_set_inode_state(inode,
4494                                              EXT4_STATE_DIO_UNWRITTEN);
4495         }
4496
4497         if (err && free_on_err) {
4498                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4499                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4500                 /* free data blocks we just allocated */
4501                 /* not a good idea to call discard here directly,
4502                  * but otherwise we'd need to call it every free() */
4503                 ext4_discard_preallocations(inode);
4504                 ext4_free_blocks(handle, inode, NULL, newblock,
4505                                  EXT4_C2B(sbi, allocated_clusters), fb_flags);
4506                 goto out2;
4507         }
4508
4509         /* previous routine could use block we allocated */
4510         newblock = ext4_ext_pblock(&newex);
4511         allocated = ext4_ext_get_actual_len(&newex);
4512         if (allocated > map->m_len)
4513                 allocated = map->m_len;
4514         map->m_flags |= EXT4_MAP_NEW;
4515
4516         /*
4517          * Update reserved blocks/metadata blocks after successful
4518          * block allocation which had been deferred till now.
4519          */
4520         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4521                 unsigned int reserved_clusters;
4522                 /*
4523                  * Check how many clusters we had reserved this allocated range
4524                  */
4525                 reserved_clusters = get_reserved_cluster_alloc(inode,
4526                                                 map->m_lblk, allocated);
4527                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4528                         if (reserved_clusters) {
4529                                 /*
4530                                  * We have clusters reserved for this range.
4531                                  * But since we are not doing actual allocation
4532                                  * and are simply using blocks from previously
4533                                  * allocated cluster, we should release the
4534                                  * reservation and not claim quota.
4535                                  */
4536                                 ext4_da_update_reserve_space(inode,
4537                                                 reserved_clusters, 0);
4538                         }
4539                 } else {
4540                         BUG_ON(allocated_clusters < reserved_clusters);
4541                         if (reserved_clusters < allocated_clusters) {
4542                                 struct ext4_inode_info *ei = EXT4_I(inode);
4543                                 int reservation = allocated_clusters -
4544                                                   reserved_clusters;
4545                                 /*
4546                                  * It seems we claimed few clusters outside of
4547                                  * the range of this allocation. We should give
4548                                  * it back to the reservation pool. This can
4549                                  * happen in the following case:
4550                                  *
4551                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4552                                  *   cluster has 4 blocks. Thus, the clusters
4553                                  *   are [0-3],[4-7],[8-11]...
4554                                  * * First comes delayed allocation write for
4555                                  *   logical blocks 10 & 11. Since there were no
4556                                  *   previous delayed allocated blocks in the
4557                                  *   range [8-11], we would reserve 1 cluster
4558                                  *   for this write.
4559                                  * * Next comes write for logical blocks 3 to 8.
4560                                  *   In this case, we will reserve 2 clusters
4561                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4562                                  *   that range has a delayed allocated blocks.
4563                                  *   Thus total reserved clusters now becomes 3.
4564                                  * * Now, during the delayed allocation writeout
4565                                  *   time, we will first write blocks [3-8] and
4566                                  *   allocate 3 clusters for writing these
4567                                  *   blocks. Also, we would claim all these
4568                                  *   three clusters above.
4569                                  * * Now when we come here to writeout the
4570                                  *   blocks [10-11], we would expect to claim
4571                                  *   the reservation of 1 cluster we had made
4572                                  *   (and we would claim it since there are no
4573                                  *   more delayed allocated blocks in the range
4574                                  *   [8-11]. But our reserved cluster count had
4575                                  *   already gone to 0.
4576                                  *
4577                                  *   Thus, at the step 4 above when we determine
4578                                  *   that there are still some unwritten delayed
4579                                  *   allocated blocks outside of our current
4580                                  *   block range, we should increment the
4581                                  *   reserved clusters count so that when the
4582                                  *   remaining blocks finally gets written, we
4583                                  *   could claim them.
4584                                  */
4585                                 dquot_reserve_block(inode,
4586                                                 EXT4_C2B(sbi, reservation));
4587                                 spin_lock(&ei->i_block_reservation_lock);
4588                                 ei->i_reserved_data_blocks += reservation;
4589                                 spin_unlock(&ei->i_block_reservation_lock);
4590                         }
4591                         /*
4592                          * We will claim quota for all newly allocated blocks.
4593                          * We're updating the reserved space *after* the
4594                          * correction above so we do not accidentally free
4595                          * all the metadata reservation because we might
4596                          * actually need it later on.
4597                          */
4598                         ext4_da_update_reserve_space(inode, allocated_clusters,
4599                                                         1);
4600                 }
4601         }
4602
4603         /*
4604          * Cache the extent and update transaction to commit on fdatasync only
4605          * when it is _not_ an unwritten extent.
4606          */
4607         if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4608                 ext4_update_inode_fsync_trans(handle, inode, 1);
4609         else
4610                 ext4_update_inode_fsync_trans(handle, inode, 0);
4611 out:
4612         if (allocated > map->m_len)
4613                 allocated = map->m_len;
4614         ext4_ext_show_leaf(inode, path);
4615         map->m_flags |= EXT4_MAP_MAPPED;
4616         map->m_pblk = newblock;
4617         map->m_len = allocated;
4618 out2:
4619         ext4_ext_drop_refs(path);
4620         kfree(path);
4621
4622         trace_ext4_ext_map_blocks_exit(inode, flags, map,
4623                                        err ? err : allocated);
4624         ext4_es_lru_add(inode);
4625         return err ? err : allocated;
4626 }
4627
4628 void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4629 {
4630         struct super_block *sb = inode->i_sb;
4631         ext4_lblk_t last_block;
4632         int err = 0;
4633
4634         /*
4635          * TODO: optimization is possible here.
4636          * Probably we need not scan at all,
4637          * because page truncation is enough.
4638          */
4639
4640         /* we have to know where to truncate from in crash case */
4641         EXT4_I(inode)->i_disksize = inode->i_size;
4642         ext4_mark_inode_dirty(handle, inode);
4643
4644         last_block = (inode->i_size + sb->s_blocksize - 1)
4645                         >> EXT4_BLOCK_SIZE_BITS(sb);
4646 retry:
4647         err = ext4_es_remove_extent(inode, last_block,
4648                                     EXT_MAX_BLOCKS - last_block);
4649         if (err == -ENOMEM) {
4650                 cond_resched();
4651                 congestion_wait(BLK_RW_ASYNC, HZ/50);
4652                 goto retry;
4653         }
4654         if (err) {
4655                 ext4_std_error(inode->i_sb, err);
4656                 return;
4657         }
4658         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4659         ext4_std_error(inode->i_sb, err);
4660 }
4661
4662 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4663                                   ext4_lblk_t len, loff_t new_size,
4664                                   int flags, int mode)
4665 {
4666         struct inode *inode = file_inode(file);
4667         handle_t *handle;
4668         int ret = 0;
4669         int ret2 = 0;
4670         int retries = 0;
4671         struct ext4_map_blocks map;
4672         unsigned int credits;
4673         loff_t epos;
4674
4675         map.m_lblk = offset;
4676         map.m_len = len;
4677         /*
4678          * Don't normalize the request if it can fit in one extent so
4679          * that it doesn't get unnecessarily split into multiple
4680          * extents.
4681          */
4682         if (len <= EXT_UNWRITTEN_MAX_LEN)
4683                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4684
4685         /*
4686          * credits to insert 1 extent into extent tree
4687          */
4688         credits = ext4_chunk_trans_blocks(inode, len);
4689
4690 retry:
4691         while (ret >= 0 && len) {
4692                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4693                                             credits);
4694                 if (IS_ERR(handle)) {
4695                         ret = PTR_ERR(handle);
4696                         break;
4697                 }
4698                 ret = ext4_map_blocks(handle, inode, &map, flags);
4699                 if (ret <= 0) {
4700                         ext4_debug("inode #%lu: block %u: len %u: "
4701                                    "ext4_ext_map_blocks returned %d",
4702                                    inode->i_ino, map.m_lblk,
4703                                    map.m_len, ret);
4704                         ext4_mark_inode_dirty(handle, inode);
4705                         ret2 = ext4_journal_stop(handle);
4706                         break;
4707                 }
4708                 map.m_lblk += ret;
4709                 map.m_len = len = len - ret;
4710                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4711                 inode->i_ctime = ext4_current_time(inode);
4712                 if (new_size) {
4713                         if (epos > new_size)
4714                                 epos = new_size;
4715                         if (ext4_update_inode_size(inode, epos) & 0x1)
4716                                 inode->i_mtime = inode->i_ctime;
4717                 } else {
4718                         if (epos > inode->i_size)
4719                                 ext4_set_inode_flag(inode,
4720                                                     EXT4_INODE_EOFBLOCKS);
4721                 }
4722                 ext4_mark_inode_dirty(handle, inode);
4723                 ret2 = ext4_journal_stop(handle);
4724                 if (ret2)
4725                         break;
4726         }
4727         if (ret == -ENOSPC &&
4728                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4729                 ret = 0;
4730                 goto retry;
4731         }
4732
4733         return ret > 0 ? ret2 : ret;
4734 }
4735
4736 static long ext4_zero_range(struct file *file, loff_t offset,
4737                             loff_t len, int mode)
4738 {
4739         struct inode *inode = file_inode(file);
4740         handle_t *handle = NULL;
4741         unsigned int max_blocks;
4742         loff_t new_size = 0;
4743         int ret = 0;
4744         int flags;
4745         int credits;
4746         int partial_begin, partial_end;
4747         loff_t start, end;
4748         ext4_lblk_t lblk;
4749         struct address_space *mapping = inode->i_mapping;
4750         unsigned int blkbits = inode->i_blkbits;
4751
4752         trace_ext4_zero_range(inode, offset, len, mode);
4753
4754         if (!S_ISREG(inode->i_mode))
4755                 return -EINVAL;
4756
4757         /* Call ext4_force_commit to flush all data in case of data=journal. */
4758         if (ext4_should_journal_data(inode)) {
4759                 ret = ext4_force_commit(inode->i_sb);
4760                 if (ret)
4761                         return ret;
4762         }
4763
4764         /*
4765          * Write out all dirty pages to avoid race conditions
4766          * Then release them.
4767          */
4768         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4769                 ret = filemap_write_and_wait_range(mapping, offset,
4770                                                    offset + len - 1);
4771                 if (ret)
4772                         return ret;
4773         }
4774
4775         /*
4776          * Round up offset. This is not fallocate, we neet to zero out
4777          * blocks, so convert interior block aligned part of the range to
4778          * unwritten and possibly manually zero out unaligned parts of the
4779          * range.
4780          */
4781         start = round_up(offset, 1 << blkbits);
4782         end = round_down((offset + len), 1 << blkbits);
4783
4784         if (start < offset || end > offset + len)
4785                 return -EINVAL;
4786         partial_begin = offset & ((1 << blkbits) - 1);
4787         partial_end = (offset + len) & ((1 << blkbits) - 1);
4788
4789         lblk = start >> blkbits;
4790         max_blocks = (end >> blkbits);
4791         if (max_blocks < lblk)
4792                 max_blocks = 0;
4793         else
4794                 max_blocks -= lblk;
4795
4796         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
4797                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4798                 EXT4_EX_NOCACHE;
4799         if (mode & FALLOC_FL_KEEP_SIZE)
4800                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4801
4802         mutex_lock(&inode->i_mutex);
4803
4804         /*
4805          * Indirect files do not support unwritten extnets
4806          */
4807         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4808                 ret = -EOPNOTSUPP;
4809                 goto out_mutex;
4810         }
4811
4812         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4813              offset + len > i_size_read(inode)) {
4814                 new_size = offset + len;
4815                 ret = inode_newsize_ok(inode, new_size);
4816                 if (ret)
4817                         goto out_mutex;
4818                 /*
4819                  * If we have a partial block after EOF we have to allocate
4820                  * the entire block.
4821                  */
4822                 if (partial_end)
4823                         max_blocks += 1;
4824         }
4825
4826         if (max_blocks > 0) {
4827
4828                 /* Now release the pages and zero block aligned part of pages*/
4829                 truncate_pagecache_range(inode, start, end - 1);
4830                 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4831
4832                 /* Wait all existing dio workers, newcomers will block on i_mutex */
4833                 ext4_inode_block_unlocked_dio(inode);
4834                 inode_dio_wait(inode);
4835
4836                 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4837                                              flags, mode);
4838                 if (ret)
4839                         goto out_dio;
4840                 /*
4841                  * Remove entire range from the extent status tree.
4842                  *
4843                  * ext4_es_remove_extent(inode, lblk, max_blocks) is
4844                  * NOT sufficient.  I'm not sure why this is the case,
4845                  * but let's be conservative and remove the extent
4846                  * status tree for the entire inode.  There should be
4847                  * no outstanding delalloc extents thanks to the
4848                  * filemap_write_and_wait_range() call above.
4849                  */
4850                 ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
4851                 if (ret)
4852                         goto out_dio;
4853         }
4854         if (!partial_begin && !partial_end)
4855                 goto out_dio;
4856
4857         /*
4858          * In worst case we have to writeout two nonadjacent unwritten
4859          * blocks and update the inode
4860          */
4861         credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4862         if (ext4_should_journal_data(inode))
4863                 credits += 2;
4864         handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4865         if (IS_ERR(handle)) {
4866                 ret = PTR_ERR(handle);
4867                 ext4_std_error(inode->i_sb, ret);
4868                 goto out_dio;
4869         }
4870
4871         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4872         if (new_size) {
4873                 ext4_update_inode_size(inode, new_size);
4874         } else {
4875                 /*
4876                 * Mark that we allocate beyond EOF so the subsequent truncate
4877                 * can proceed even if the new size is the same as i_size.
4878                 */
4879                 if ((offset + len) > i_size_read(inode))
4880                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4881         }
4882         ext4_mark_inode_dirty(handle, inode);
4883
4884         /* Zero out partial block at the edges of the range */
4885         ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4886
4887         if (file->f_flags & O_SYNC)
4888                 ext4_handle_sync(handle);
4889
4890         ext4_journal_stop(handle);
4891 out_dio:
4892         ext4_inode_resume_unlocked_dio(inode);
4893 out_mutex:
4894         mutex_unlock(&inode->i_mutex);
4895         return ret;
4896 }
4897
4898 /*
4899  * preallocate space for a file. This implements ext4's fallocate file
4900  * operation, which gets called from sys_fallocate system call.
4901  * For block-mapped files, posix_fallocate should fall back to the method
4902  * of writing zeroes to the required new blocks (the same behavior which is
4903  * expected for file systems which do not support fallocate() system call).
4904  */
4905 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4906 {
4907         struct inode *inode = file_inode(file);
4908         loff_t new_size = 0;
4909         unsigned int max_blocks;
4910         int ret = 0;
4911         int flags;
4912         ext4_lblk_t lblk;
4913         unsigned int blkbits = inode->i_blkbits;
4914
4915         /* Return error if mode is not supported */
4916         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4917                      FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
4918                 return -EOPNOTSUPP;
4919
4920         if (mode & FALLOC_FL_PUNCH_HOLE)
4921                 return ext4_punch_hole(inode, offset, len);
4922
4923         ret = ext4_convert_inline_data(inode);
4924         if (ret)
4925                 return ret;
4926
4927         /*
4928          * currently supporting (pre)allocate mode for extent-based
4929          * files _only_
4930          */
4931         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4932                 return -EOPNOTSUPP;
4933
4934         if (mode & FALLOC_FL_COLLAPSE_RANGE)
4935                 return ext4_collapse_range(inode, offset, len);
4936
4937         if (mode & FALLOC_FL_ZERO_RANGE)
4938                 return ext4_zero_range(file, offset, len, mode);
4939
4940         trace_ext4_fallocate_enter(inode, offset, len, mode);
4941         lblk = offset >> blkbits;
4942         /*
4943          * We can't just convert len to max_blocks because
4944          * If blocksize = 4096 offset = 3072 and len = 2048
4945          */
4946         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4947                 - lblk;
4948
4949         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4950         if (mode & FALLOC_FL_KEEP_SIZE)
4951                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4952
4953         mutex_lock(&inode->i_mutex);
4954
4955         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4956              offset + len > i_size_read(inode)) {
4957                 new_size = offset + len;
4958                 ret = inode_newsize_ok(inode, new_size);
4959                 if (ret)
4960                         goto out;
4961         }
4962
4963         ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4964                                      flags, mode);
4965         if (ret)
4966                 goto out;
4967
4968         if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4969                 ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4970                                                 EXT4_I(inode)->i_sync_tid);
4971         }
4972 out:
4973         mutex_unlock(&inode->i_mutex);
4974         trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4975         return ret;
4976 }
4977
4978 /*
4979  * This function convert a range of blocks to written extents
4980  * The caller of this function will pass the start offset and the size.
4981  * all unwritten extents within this range will be converted to
4982  * written extents.
4983  *
4984  * This function is called from the direct IO end io call back
4985  * function, to convert the fallocated extents after IO is completed.
4986  * Returns 0 on success.
4987  */
4988 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4989                                    loff_t offset, ssize_t len)
4990 {
4991         unsigned int max_blocks;
4992         int ret = 0;
4993         int ret2 = 0;
4994         struct ext4_map_blocks map;
4995         unsigned int credits, blkbits = inode->i_blkbits;
4996
4997         map.m_lblk = offset >> blkbits;
4998         /*
4999          * We can't just convert len to max_blocks because
5000          * If blocksize = 4096 offset = 3072 and len = 2048
5001          */
5002         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
5003                       map.m_lblk);
5004         /*
5005          * This is somewhat ugly but the idea is clear: When transaction is
5006          * reserved, everything goes into it. Otherwise we rather start several
5007          * smaller transactions for conversion of each extent separately.
5008          */
5009         if (handle) {
5010                 handle = ext4_journal_start_reserved(handle,
5011                                                      EXT4_HT_EXT_CONVERT);
5012                 if (IS_ERR(handle))
5013                         return PTR_ERR(handle);
5014                 credits = 0;
5015         } else {
5016                 /*
5017                  * credits to insert 1 extent into extent tree
5018                  */
5019                 credits = ext4_chunk_trans_blocks(inode, max_blocks);
5020         }
5021         while (ret >= 0 && ret < max_blocks) {
5022                 map.m_lblk += ret;
5023                 map.m_len = (max_blocks -= ret);
5024                 if (credits) {
5025                         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
5026                                                     credits);
5027                         if (IS_ERR(handle)) {
5028                                 ret = PTR_ERR(handle);
5029                                 break;
5030                         }
5031                 }
5032                 ret = ext4_map_blocks(handle, inode, &map,
5033                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
5034                 if (ret <= 0)
5035                         ext4_warning(inode->i_sb,
5036                                      "inode #%lu: block %u: len %u: "
5037                                      "ext4_ext_map_blocks returned %d",
5038                                      inode->i_ino, map.m_lblk,
5039                                      map.m_len, ret);
5040                 ext4_mark_inode_dirty(handle, inode);
5041                 if (credits)
5042                         ret2 = ext4_journal_stop(handle);
5043                 if (ret <= 0 || ret2)
5044                         break;
5045         }
5046         if (!credits)
5047                 ret2 = ext4_journal_stop(handle);
5048         return ret > 0 ? ret2 : ret;
5049 }
5050
5051 /*
5052  * If newes is not existing extent (newes->ec_pblk equals zero) find
5053  * delayed extent at start of newes and update newes accordingly and
5054  * return start of the next delayed extent.
5055  *
5056  * If newes is existing extent (newes->ec_pblk is not equal zero)
5057  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
5058  * extent found. Leave newes unmodified.
5059  */
5060 static int ext4_find_delayed_extent(struct inode *inode,
5061                                     struct extent_status *newes)
5062 {
5063         struct extent_status es;
5064         ext4_lblk_t block, next_del;
5065
5066         if (newes->es_pblk == 0) {
5067                 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
5068                                 newes->es_lblk + newes->es_len - 1, &es);
5069
5070                 /*
5071                  * No extent in extent-tree contains block @newes->es_pblk,
5072                  * then the block may stay in 1)a hole or 2)delayed-extent.
5073                  */
5074                 if (es.es_len == 0)
5075                         /* A hole found. */
5076                         return 0;
5077
5078                 if (es.es_lblk > newes->es_lblk) {
5079                         /* A hole found. */
5080                         newes->es_len = min(es.es_lblk - newes->es_lblk,
5081                                             newes->es_len);
5082                         return 0;
5083                 }
5084
5085                 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
5086         }
5087
5088         block = newes->es_lblk + newes->es_len;
5089         ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
5090         if (es.es_len == 0)
5091                 next_del = EXT_MAX_BLOCKS;
5092         else
5093                 next_del = es.es_lblk;
5094
5095         return next_del;
5096 }
5097 /* fiemap flags we can handle specified here */
5098 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
5099
5100 static int ext4_xattr_fiemap(struct inode *inode,
5101                                 struct fiemap_extent_info *fieinfo)
5102 {
5103         __u64 physical = 0;
5104         __u64 length;
5105         __u32 flags = FIEMAP_EXTENT_LAST;
5106         int blockbits = inode->i_sb->s_blocksize_bits;
5107         int error = 0;
5108
5109         /* in-inode? */
5110         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
5111                 struct ext4_iloc iloc;
5112                 int offset;     /* offset of xattr in inode */
5113
5114                 error = ext4_get_inode_loc(inode, &iloc);
5115                 if (error)
5116                         return error;
5117                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
5118                 offset = EXT4_GOOD_OLD_INODE_SIZE +
5119                                 EXT4_I(inode)->i_extra_isize;
5120                 physical += offset;
5121                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
5122                 flags |= FIEMAP_EXTENT_DATA_INLINE;
5123                 brelse(iloc.bh);
5124         } else { /* external block */
5125                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
5126                 length = inode->i_sb->s_blocksize;
5127         }
5128
5129         if (physical)
5130                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
5131                                                 length, flags);
5132         return (error < 0 ? error : 0);
5133 }
5134
5135 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5136                 __u64 start, __u64 len)
5137 {
5138         ext4_lblk_t start_blk;
5139         int error = 0;
5140
5141         if (ext4_has_inline_data(inode)) {
5142                 int has_inline = 1;
5143
5144                 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
5145
5146                 if (has_inline)
5147                         return error;
5148         }
5149
5150         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5151                 error = ext4_ext_precache(inode);
5152                 if (error)
5153                         return error;
5154         }
5155
5156         /* fallback to generic here if not in extents fmt */
5157         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5158                 return generic_block_fiemap(inode, fieinfo, start, len,
5159                         ext4_get_block);
5160
5161         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5162                 return -EBADR;
5163
5164         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5165                 error = ext4_xattr_fiemap(inode, fieinfo);
5166         } else {
5167                 ext4_lblk_t len_blks;
5168                 __u64 last_blk;
5169
5170                 start_blk = start >> inode->i_sb->s_blocksize_bits;
5171                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5172                 if (last_blk >= EXT_MAX_BLOCKS)
5173                         last_blk = EXT_MAX_BLOCKS-1;
5174                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5175
5176                 /*
5177                  * Walk the extent tree gathering extent information
5178                  * and pushing extents back to the user.
5179                  */
5180                 error = ext4_fill_fiemap_extents(inode, start_blk,
5181                                                  len_blks, fieinfo);
5182         }
5183         ext4_es_lru_add(inode);
5184         return error;
5185 }
5186
5187 /*
5188  * ext4_access_path:
5189  * Function to access the path buffer for marking it dirty.
5190  * It also checks if there are sufficient credits left in the journal handle
5191  * to update path.
5192  */
5193 static int
5194 ext4_access_path(handle_t *handle, struct inode *inode,
5195                 struct ext4_ext_path *path)
5196 {
5197         int credits, err;
5198
5199         if (!ext4_handle_valid(handle))
5200                 return 0;
5201
5202         /*
5203          * Check if need to extend journal credits
5204          * 3 for leaf, sb, and inode plus 2 (bmap and group
5205          * descriptor) for each block group; assume two block
5206          * groups
5207          */
5208         if (handle->h_buffer_credits < 7) {
5209                 credits = ext4_writepage_trans_blocks(inode);
5210                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
5211                 /* EAGAIN is success */
5212                 if (err && err != -EAGAIN)
5213                         return err;
5214         }
5215
5216         err = ext4_ext_get_access(handle, inode, path);
5217         return err;
5218 }
5219
5220 /*
5221  * ext4_ext_shift_path_extents:
5222  * Shift the extents of a path structure lying between path[depth].p_ext
5223  * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
5224  * from starting block for each extent.
5225  */
5226 static int
5227 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5228                             struct inode *inode, handle_t *handle,
5229                             ext4_lblk_t *start)
5230 {
5231         int depth, err = 0;
5232         struct ext4_extent *ex_start, *ex_last;
5233         bool update = 0;
5234         depth = path->p_depth;
5235
5236         while (depth >= 0) {
5237                 if (depth == path->p_depth) {
5238                         ex_start = path[depth].p_ext;
5239                         if (!ex_start)
5240                                 return -EIO;
5241
5242                         ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5243                         if (!ex_last)
5244                                 return -EIO;
5245
5246                         err = ext4_access_path(handle, inode, path + depth);
5247                         if (err)
5248                                 goto out;
5249
5250                         if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5251                                 update = 1;
5252
5253                         *start = le32_to_cpu(ex_last->ee_block) +
5254                                 ext4_ext_get_actual_len(ex_last);
5255
5256                         while (ex_start <= ex_last) {
5257                                 le32_add_cpu(&ex_start->ee_block, -shift);
5258                                 /* Try to merge to the left. */
5259                                 if ((ex_start >
5260                                      EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
5261                                     ext4_ext_try_to_merge_right(inode,
5262                                                         path, ex_start - 1))
5263                                         ex_last--;
5264                                 else
5265                                         ex_start++;
5266                         }
5267                         err = ext4_ext_dirty(handle, inode, path + depth);
5268                         if (err)
5269                                 goto out;
5270
5271                         if (--depth < 0 || !update)
5272                                 break;
5273                 }
5274
5275                 /* Update index too */
5276                 err = ext4_access_path(handle, inode, path + depth);
5277                 if (err)
5278                         goto out;
5279
5280                 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5281                 err = ext4_ext_dirty(handle, inode, path + depth);
5282                 if (err)
5283                         goto out;
5284
5285                 /* we are done if current index is not a starting index */
5286                 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5287                         break;
5288
5289                 depth--;
5290         }
5291
5292 out:
5293         return err;
5294 }
5295
5296 /*
5297  * ext4_ext_shift_extents:
5298  * All the extents which lies in the range from start to the last allocated
5299  * block for the file are shifted downwards by shift blocks.
5300  * On success, 0 is returned, error otherwise.
5301  */
5302 static int
5303 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5304                        ext4_lblk_t start, ext4_lblk_t shift)
5305 {
5306         struct ext4_ext_path *path;
5307         int ret = 0, depth;
5308         struct ext4_extent *extent;
5309         ext4_lblk_t stop_block;
5310         ext4_lblk_t ex_start, ex_end;
5311
5312         /* Let path point to the last extent */
5313         path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
5314         if (IS_ERR(path))
5315                 return PTR_ERR(path);
5316
5317         depth = path->p_depth;
5318         extent = path[depth].p_ext;
5319         if (!extent)
5320                 goto out;
5321
5322         stop_block = le32_to_cpu(extent->ee_block) +
5323                         ext4_ext_get_actual_len(extent);
5324
5325         /* Nothing to shift, if hole is at the end of file */
5326         if (start >= stop_block)
5327                 goto out;
5328
5329         /*
5330          * Don't start shifting extents until we make sure the hole is big
5331          * enough to accomodate the shift.
5332          */
5333         path = ext4_find_extent(inode, start - 1, &path, 0);
5334         if (IS_ERR(path))
5335                 return PTR_ERR(path);
5336         depth = path->p_depth;
5337         extent =  path[depth].p_ext;
5338         if (extent) {
5339                 ex_start = le32_to_cpu(extent->ee_block);
5340                 ex_end = le32_to_cpu(extent->ee_block) +
5341                         ext4_ext_get_actual_len(extent);
5342         } else {
5343                 ex_start = 0;
5344                 ex_end = 0;
5345         }
5346
5347         if ((start == ex_start && shift > ex_start) ||
5348             (shift > start - ex_end))
5349                 return -EINVAL;
5350
5351         /* Its safe to start updating extents */
5352         while (start < stop_block) {
5353                 path = ext4_find_extent(inode, start, &path, 0);
5354                 if (IS_ERR(path))
5355                         return PTR_ERR(path);
5356                 depth = path->p_depth;
5357                 extent = path[depth].p_ext;
5358                 if (!extent) {
5359                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5360                                          (unsigned long) start);
5361                         return -EIO;
5362                 }
5363                 if (start > le32_to_cpu(extent->ee_block)) {
5364                         /* Hole, move to the next extent */
5365                         if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5366                                 path[depth].p_ext++;
5367                         } else {
5368                                 start = ext4_ext_next_allocated_block(path);
5369                                 continue;
5370                         }
5371                 }
5372                 ret = ext4_ext_shift_path_extents(path, shift, inode,
5373                                 handle, &start);
5374                 if (ret)
5375                         break;
5376         }
5377 out:
5378         ext4_ext_drop_refs(path);
5379         kfree(path);
5380         return ret;
5381 }
5382
5383 /*
5384  * ext4_collapse_range:
5385  * This implements the fallocate's collapse range functionality for ext4
5386  * Returns: 0 and non-zero on error.
5387  */
5388 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5389 {
5390         struct super_block *sb = inode->i_sb;
5391         ext4_lblk_t punch_start, punch_stop;
5392         handle_t *handle;
5393         unsigned int credits;
5394         loff_t new_size, ioffset;
5395         int ret;
5396
5397         /* Collapse range works only on fs block size aligned offsets. */
5398         if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
5399             len & (EXT4_CLUSTER_SIZE(sb) - 1))
5400                 return -EINVAL;
5401
5402         if (!S_ISREG(inode->i_mode))
5403                 return -EINVAL;
5404
5405         trace_ext4_collapse_range(inode, offset, len);
5406
5407         punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5408         punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5409
5410         /* Call ext4_force_commit to flush all data in case of data=journal. */
5411         if (ext4_should_journal_data(inode)) {
5412                 ret = ext4_force_commit(inode->i_sb);
5413                 if (ret)
5414                         return ret;
5415         }
5416
5417         /*
5418          * Need to round down offset to be aligned with page size boundary
5419          * for page size > block size.
5420          */
5421         ioffset = round_down(offset, PAGE_SIZE);
5422
5423         /* Write out all dirty pages */
5424         ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5425                                            LLONG_MAX);
5426         if (ret)
5427                 return ret;
5428
5429         /* Take mutex lock */
5430         mutex_lock(&inode->i_mutex);
5431
5432         /*
5433          * There is no need to overlap collapse range with EOF, in which case
5434          * it is effectively a truncate operation
5435          */
5436         if (offset + len >= i_size_read(inode)) {
5437                 ret = -EINVAL;
5438                 goto out_mutex;
5439         }
5440
5441         /* Currently just for extent based files */
5442         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5443                 ret = -EOPNOTSUPP;
5444                 goto out_mutex;
5445         }
5446
5447         truncate_pagecache(inode, ioffset);
5448
5449         /* Wait for existing dio to complete */
5450         ext4_inode_block_unlocked_dio(inode);
5451         inode_dio_wait(inode);
5452
5453         credits = ext4_writepage_trans_blocks(inode);
5454         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5455         if (IS_ERR(handle)) {
5456                 ret = PTR_ERR(handle);
5457                 goto out_dio;
5458         }
5459
5460         down_write(&EXT4_I(inode)->i_data_sem);
5461         ext4_discard_preallocations(inode);
5462
5463         ret = ext4_es_remove_extent(inode, punch_start,
5464                                     EXT_MAX_BLOCKS - punch_start);
5465         if (ret) {
5466                 up_write(&EXT4_I(inode)->i_data_sem);
5467                 goto out_stop;
5468         }
5469
5470         ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5471         if (ret) {
5472                 up_write(&EXT4_I(inode)->i_data_sem);
5473                 goto out_stop;
5474         }
5475         ext4_discard_preallocations(inode);
5476
5477         ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5478                                      punch_stop - punch_start);
5479         if (ret) {
5480                 up_write(&EXT4_I(inode)->i_data_sem);
5481                 goto out_stop;
5482         }
5483
5484         new_size = i_size_read(inode) - len;
5485         i_size_write(inode, new_size);
5486         EXT4_I(inode)->i_disksize = new_size;
5487
5488         up_write(&EXT4_I(inode)->i_data_sem);
5489         if (IS_SYNC(inode))
5490                 ext4_handle_sync(handle);
5491         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
5492         ext4_mark_inode_dirty(handle, inode);
5493
5494 out_stop:
5495         ext4_journal_stop(handle);
5496 out_dio:
5497         ext4_inode_resume_unlocked_dio(inode);
5498 out_mutex:
5499         mutex_unlock(&inode->i_mutex);
5500         return ret;
5501 }
5502
5503 /**
5504  * ext4_swap_extents - Swap extents between two inodes
5505  *
5506  * @inode1:     First inode
5507  * @inode2:     Second inode
5508  * @lblk1:      Start block for first inode
5509  * @lblk2:      Start block for second inode
5510  * @count:      Number of blocks to swap
5511  * @mark_unwritten: Mark second inode's extents as unwritten after swap
5512  * @erp:        Pointer to save error value
5513  *
5514  * This helper routine does exactly what is promise "swap extents". All other
5515  * stuff such as page-cache locking consistency, bh mapping consistency or
5516  * extent's data copying must be performed by caller.
5517  * Locking:
5518  *              i_mutex is held for both inodes
5519  *              i_data_sem is locked for write for both inodes
5520  * Assumptions:
5521  *              All pages from requested range are locked for both inodes
5522  */
5523 int
5524 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5525                      struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5526                   ext4_lblk_t count, int unwritten, int *erp)
5527 {
5528         struct ext4_ext_path *path1 = NULL;
5529         struct ext4_ext_path *path2 = NULL;
5530         int replaced_count = 0;
5531
5532         BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5533         BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5534         BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5535         BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5536
5537         *erp = ext4_es_remove_extent(inode1, lblk1, count);
5538         if (unlikely(*erp))
5539                 return 0;
5540         *erp = ext4_es_remove_extent(inode2, lblk2, count);
5541         if (unlikely(*erp))
5542                 return 0;
5543
5544         while (count) {
5545                 struct ext4_extent *ex1, *ex2, tmp_ex;
5546                 ext4_lblk_t e1_blk, e2_blk;
5547                 int e1_len, e2_len, len;
5548                 int split = 0;
5549
5550                 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5551                 if (unlikely(IS_ERR(path1))) {
5552                         *erp = PTR_ERR(path1);
5553                         path1 = NULL;
5554                 finish:
5555                         count = 0;
5556                         goto repeat;
5557                 }
5558                 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5559                 if (unlikely(IS_ERR(path2))) {
5560                         *erp = PTR_ERR(path2);
5561                         path2 = NULL;
5562                         goto finish;
5563                 }
5564                 ex1 = path1[path1->p_depth].p_ext;
5565                 ex2 = path2[path2->p_depth].p_ext;
5566                 /* Do we have somthing to swap ? */
5567                 if (unlikely(!ex2 || !ex1))
5568                         goto finish;
5569
5570                 e1_blk = le32_to_cpu(ex1->ee_block);
5571                 e2_blk = le32_to_cpu(ex2->ee_block);
5572                 e1_len = ext4_ext_get_actual_len(ex1);
5573                 e2_len = ext4_ext_get_actual_len(ex2);
5574
5575                 /* Hole handling */
5576                 if (!in_range(lblk1, e1_blk, e1_len) ||
5577                     !in_range(lblk2, e2_blk, e2_len)) {
5578                         ext4_lblk_t next1, next2;
5579
5580                         /* if hole after extent, then go to next extent */
5581                         next1 = ext4_ext_next_allocated_block(path1);
5582                         next2 = ext4_ext_next_allocated_block(path2);
5583                         /* If hole before extent, then shift to that extent */
5584                         if (e1_blk > lblk1)
5585                                 next1 = e1_blk;
5586                         if (e2_blk > lblk2)
5587                                 next2 = e1_blk;
5588                         /* Do we have something to swap */
5589                         if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5590                                 goto finish;
5591                         /* Move to the rightest boundary */
5592                         len = next1 - lblk1;
5593                         if (len < next2 - lblk2)
5594                                 len = next2 - lblk2;
5595                         if (len > count)
5596                                 len = count;
5597                         lblk1 += len;
5598                         lblk2 += len;
5599                         count -= len;
5600                         goto repeat;
5601                 }
5602
5603                 /* Prepare left boundary */
5604                 if (e1_blk < lblk1) {
5605                         split = 1;
5606                         *erp = ext4_force_split_extent_at(handle, inode1,
5607                                                 &path1, lblk1, 0);
5608                         if (unlikely(*erp))
5609                                 goto finish;
5610                 }
5611                 if (e2_blk < lblk2) {
5612                         split = 1;
5613                         *erp = ext4_force_split_extent_at(handle, inode2,
5614                                                 &path2,  lblk2, 0);
5615                         if (unlikely(*erp))
5616                                 goto finish;
5617                 }
5618                 /* ext4_split_extent_at() may result in leaf extent split,
5619                  * path must to be revalidated. */
5620                 if (split)
5621                         goto repeat;
5622
5623                 /* Prepare right boundary */
5624                 len = count;
5625                 if (len > e1_blk + e1_len - lblk1)
5626                         len = e1_blk + e1_len - lblk1;
5627                 if (len > e2_blk + e2_len - lblk2)
5628                         len = e2_blk + e2_len - lblk2;
5629
5630                 if (len != e1_len) {
5631                         split = 1;
5632                         *erp = ext4_force_split_extent_at(handle, inode1,
5633                                                 &path1, lblk1 + len, 0);
5634                         if (unlikely(*erp))
5635                                 goto finish;
5636                 }
5637                 if (len != e2_len) {
5638                         split = 1;
5639                         *erp = ext4_force_split_extent_at(handle, inode2,
5640                                                 &path2, lblk2 + len, 0);
5641                         if (*erp)
5642                                 goto finish;
5643                 }
5644                 /* ext4_split_extent_at() may result in leaf extent split,
5645                  * path must to be revalidated. */
5646                 if (split)
5647                         goto repeat;
5648
5649                 BUG_ON(e2_len != e1_len);
5650                 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5651                 if (unlikely(*erp))
5652                         goto finish;
5653                 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5654                 if (unlikely(*erp))
5655                         goto finish;
5656
5657                 /* Both extents are fully inside boundaries. Swap it now */
5658                 tmp_ex = *ex1;
5659                 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5660                 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5661                 ex1->ee_len = cpu_to_le16(e2_len);
5662                 ex2->ee_len = cpu_to_le16(e1_len);
5663                 if (unwritten)
5664                         ext4_ext_mark_unwritten(ex2);
5665                 if (ext4_ext_is_unwritten(&tmp_ex))
5666                         ext4_ext_mark_unwritten(ex1);
5667
5668                 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5669                 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5670                 *erp = ext4_ext_dirty(handle, inode2, path2 +
5671                                       path2->p_depth);
5672                 if (unlikely(*erp))
5673                         goto finish;
5674                 *erp = ext4_ext_dirty(handle, inode1, path1 +
5675                                       path1->p_depth);
5676                 /*
5677                  * Looks scarry ah..? second inode already points to new blocks,
5678                  * and it was successfully dirtied. But luckily error may happen
5679                  * only due to journal error, so full transaction will be
5680                  * aborted anyway.
5681                  */
5682                 if (unlikely(*erp))
5683                         goto finish;
5684                 lblk1 += len;
5685                 lblk2 += len;
5686                 replaced_count += len;
5687                 count -= len;
5688
5689         repeat:
5690                 ext4_ext_drop_refs(path1);
5691                 kfree(path1);
5692                 ext4_ext_drop_refs(path2);
5693                 kfree(path2);
5694                 path1 = path2 = NULL;
5695         }
5696         return replaced_count;
5697 }