2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
50 * combine low and high parts of physical block number into ext4_fsblk_t
52 ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
56 block = le32_to_cpu(ex->ee_start_lo);
57 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
65 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
69 block = le32_to_cpu(ix->ei_leaf_lo);
70 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
75 * ext4_ext_store_pblock:
76 * stores a large physical block number into an extent struct,
77 * breaking it into parts
79 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
81 ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
86 * ext4_idx_store_pblock:
87 * stores a large physical block number into an index struct,
88 * breaking it into parts
90 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
92 ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
96 static int ext4_ext_truncate_extend_restart(handle_t *handle,
102 if (!ext4_handle_valid(handle))
104 if (handle->h_buffer_credits > needed)
106 err = ext4_journal_extend(handle, needed);
109 err = ext4_truncate_restart_trans(handle, inode, needed);
111 * We have dropped i_data_sem so someone might have cached again
112 * an extent we are going to truncate.
114 ext4_ext_invalidate_cache(inode);
124 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
125 struct ext4_ext_path *path)
128 /* path points to block */
129 return ext4_journal_get_write_access(handle, path->p_bh);
131 /* path points to leaf/index in inode body */
132 /* we use in-core data, no need to protect them */
142 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
143 struct ext4_ext_path *path)
147 /* path points to block */
148 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
150 /* path points to leaf/index in inode body */
151 err = ext4_mark_inode_dirty(handle, inode);
156 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
157 struct ext4_ext_path *path,
160 struct ext4_inode_info *ei = EXT4_I(inode);
161 ext4_fsblk_t bg_start;
162 ext4_fsblk_t last_block;
163 ext4_grpblk_t colour;
164 ext4_group_t block_group;
165 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
169 struct ext4_extent *ex;
170 depth = path->p_depth;
172 /* try to predict block placement */
173 ex = path[depth].p_ext;
175 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
177 /* it looks like index is empty;
178 * try to find starting block from index itself */
179 if (path[depth].p_bh)
180 return path[depth].p_bh->b_blocknr;
183 /* OK. use inode's group */
184 block_group = ei->i_block_group;
185 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
187 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
188 * block groups per flexgroup, reserve the first block
189 * group for directories and special files. Regular
190 * files will start at the second block group. This
191 * tends to speed up directory access and improves
194 block_group &= ~(flex_size-1);
195 if (S_ISREG(inode->i_mode))
198 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
199 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
202 * If we are doing delayed allocation, we don't need take
203 * colour into account.
205 if (test_opt(inode->i_sb, DELALLOC))
208 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
209 colour = (current->pid % 16) *
210 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
212 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
213 return bg_start + colour + block;
217 * Allocation for a meta data block
220 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
221 struct ext4_ext_path *path,
222 struct ext4_extent *ex, int *err)
224 ext4_fsblk_t goal, newblock;
226 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
227 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
231 static inline int ext4_ext_space_block(struct inode *inode, int check)
235 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
236 / sizeof(struct ext4_extent);
238 #ifdef AGGRESSIVE_TEST
246 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
250 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
251 / sizeof(struct ext4_extent_idx);
253 #ifdef AGGRESSIVE_TEST
261 static inline int ext4_ext_space_root(struct inode *inode, int check)
265 size = sizeof(EXT4_I(inode)->i_data);
266 size -= sizeof(struct ext4_extent_header);
267 size /= sizeof(struct ext4_extent);
269 #ifdef AGGRESSIVE_TEST
277 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
281 size = sizeof(EXT4_I(inode)->i_data);
282 size -= sizeof(struct ext4_extent_header);
283 size /= sizeof(struct ext4_extent_idx);
285 #ifdef AGGRESSIVE_TEST
294 * Calculate the number of metadata blocks needed
295 * to allocate @blocks
296 * Worse case is one block per extent
298 int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
300 struct ext4_inode_info *ei = EXT4_I(inode);
303 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
304 / sizeof(struct ext4_extent_idx));
307 * If the new delayed allocation block is contiguous with the
308 * previous da block, it can share index blocks with the
309 * previous block, so we only need to allocate a new index
310 * block every idxs leaf blocks. At ldxs**2 blocks, we need
311 * an additional index block, and at ldxs**3 blocks, yet
312 * another index blocks.
314 if (ei->i_da_metadata_calc_len &&
315 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
316 if ((ei->i_da_metadata_calc_len % idxs) == 0)
318 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
320 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
322 ei->i_da_metadata_calc_len = 0;
324 ei->i_da_metadata_calc_len++;
325 ei->i_da_metadata_calc_last_lblock++;
330 * In the worst case we need a new set of index blocks at
331 * every level of the inode's extent tree.
333 ei->i_da_metadata_calc_len = 1;
334 ei->i_da_metadata_calc_last_lblock = lblock;
335 return ext_depth(inode) + 1;
339 ext4_ext_max_entries(struct inode *inode, int depth)
343 if (depth == ext_depth(inode)) {
345 max = ext4_ext_space_root(inode, 1);
347 max = ext4_ext_space_root_idx(inode, 1);
350 max = ext4_ext_space_block(inode, 1);
352 max = ext4_ext_space_block_idx(inode, 1);
358 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
360 ext4_fsblk_t block = ext_pblock(ext);
361 int len = ext4_ext_get_actual_len(ext);
363 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
366 static int ext4_valid_extent_idx(struct inode *inode,
367 struct ext4_extent_idx *ext_idx)
369 ext4_fsblk_t block = idx_pblock(ext_idx);
371 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
374 static int ext4_valid_extent_entries(struct inode *inode,
375 struct ext4_extent_header *eh,
378 struct ext4_extent *ext;
379 struct ext4_extent_idx *ext_idx;
380 unsigned short entries;
381 if (eh->eh_entries == 0)
384 entries = le16_to_cpu(eh->eh_entries);
388 ext = EXT_FIRST_EXTENT(eh);
390 if (!ext4_valid_extent(inode, ext))
396 ext_idx = EXT_FIRST_INDEX(eh);
398 if (!ext4_valid_extent_idx(inode, ext_idx))
407 static int __ext4_ext_check(const char *function, struct inode *inode,
408 struct ext4_extent_header *eh,
411 const char *error_msg;
414 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
415 error_msg = "invalid magic";
418 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
419 error_msg = "unexpected eh_depth";
422 if (unlikely(eh->eh_max == 0)) {
423 error_msg = "invalid eh_max";
426 max = ext4_ext_max_entries(inode, depth);
427 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
428 error_msg = "too large eh_max";
431 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
432 error_msg = "invalid eh_entries";
435 if (!ext4_valid_extent_entries(inode, eh, depth)) {
436 error_msg = "invalid extent entries";
442 __ext4_error(inode->i_sb, function,
443 "bad header/extent in inode #%lu: %s - magic %x, "
444 "entries %u, max %u(%u), depth %u(%u)",
445 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
446 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
447 max, le16_to_cpu(eh->eh_depth), depth);
452 #define ext4_ext_check(inode, eh, depth) \
453 __ext4_ext_check(__func__, inode, eh, depth)
455 int ext4_ext_check_inode(struct inode *inode)
457 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
461 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
463 int k, l = path->p_depth;
466 for (k = 0; k <= l; k++, path++) {
468 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
469 idx_pblock(path->p_idx));
470 } else if (path->p_ext) {
471 ext_debug(" %d:[%d]%d:%llu ",
472 le32_to_cpu(path->p_ext->ee_block),
473 ext4_ext_is_uninitialized(path->p_ext),
474 ext4_ext_get_actual_len(path->p_ext),
475 ext_pblock(path->p_ext));
482 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
484 int depth = ext_depth(inode);
485 struct ext4_extent_header *eh;
486 struct ext4_extent *ex;
492 eh = path[depth].p_hdr;
493 ex = EXT_FIRST_EXTENT(eh);
495 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
497 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
498 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
499 ext4_ext_is_uninitialized(ex),
500 ext4_ext_get_actual_len(ex), ext_pblock(ex));
505 #define ext4_ext_show_path(inode, path)
506 #define ext4_ext_show_leaf(inode, path)
509 void ext4_ext_drop_refs(struct ext4_ext_path *path)
511 int depth = path->p_depth;
514 for (i = 0; i <= depth; i++, path++)
522 * ext4_ext_binsearch_idx:
523 * binary search for the closest index of the given block
524 * the header must be checked before calling this
527 ext4_ext_binsearch_idx(struct inode *inode,
528 struct ext4_ext_path *path, ext4_lblk_t block)
530 struct ext4_extent_header *eh = path->p_hdr;
531 struct ext4_extent_idx *r, *l, *m;
534 ext_debug("binsearch for %u(idx): ", block);
536 l = EXT_FIRST_INDEX(eh) + 1;
537 r = EXT_LAST_INDEX(eh);
540 if (block < le32_to_cpu(m->ei_block))
544 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
545 m, le32_to_cpu(m->ei_block),
546 r, le32_to_cpu(r->ei_block));
550 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
551 idx_pblock(path->p_idx));
553 #ifdef CHECK_BINSEARCH
555 struct ext4_extent_idx *chix, *ix;
558 chix = ix = EXT_FIRST_INDEX(eh);
559 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
561 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
562 printk(KERN_DEBUG "k=%d, ix=0x%p, "
564 ix, EXT_FIRST_INDEX(eh));
565 printk(KERN_DEBUG "%u <= %u\n",
566 le32_to_cpu(ix->ei_block),
567 le32_to_cpu(ix[-1].ei_block));
569 BUG_ON(k && le32_to_cpu(ix->ei_block)
570 <= le32_to_cpu(ix[-1].ei_block));
571 if (block < le32_to_cpu(ix->ei_block))
575 BUG_ON(chix != path->p_idx);
582 * ext4_ext_binsearch:
583 * binary search for closest extent of the given block
584 * the header must be checked before calling this
587 ext4_ext_binsearch(struct inode *inode,
588 struct ext4_ext_path *path, ext4_lblk_t block)
590 struct ext4_extent_header *eh = path->p_hdr;
591 struct ext4_extent *r, *l, *m;
593 if (eh->eh_entries == 0) {
595 * this leaf is empty:
596 * we get such a leaf in split/add case
601 ext_debug("binsearch for %u: ", block);
603 l = EXT_FIRST_EXTENT(eh) + 1;
604 r = EXT_LAST_EXTENT(eh);
608 if (block < le32_to_cpu(m->ee_block))
612 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
613 m, le32_to_cpu(m->ee_block),
614 r, le32_to_cpu(r->ee_block));
618 ext_debug(" -> %d:%llu:[%d]%d ",
619 le32_to_cpu(path->p_ext->ee_block),
620 ext_pblock(path->p_ext),
621 ext4_ext_is_uninitialized(path->p_ext),
622 ext4_ext_get_actual_len(path->p_ext));
624 #ifdef CHECK_BINSEARCH
626 struct ext4_extent *chex, *ex;
629 chex = ex = EXT_FIRST_EXTENT(eh);
630 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
631 BUG_ON(k && le32_to_cpu(ex->ee_block)
632 <= le32_to_cpu(ex[-1].ee_block));
633 if (block < le32_to_cpu(ex->ee_block))
637 BUG_ON(chex != path->p_ext);
643 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
645 struct ext4_extent_header *eh;
647 eh = ext_inode_hdr(inode);
650 eh->eh_magic = EXT4_EXT_MAGIC;
651 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
652 ext4_mark_inode_dirty(handle, inode);
653 ext4_ext_invalidate_cache(inode);
657 struct ext4_ext_path *
658 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
659 struct ext4_ext_path *path)
661 struct ext4_extent_header *eh;
662 struct buffer_head *bh;
663 short int depth, i, ppos = 0, alloc = 0;
665 eh = ext_inode_hdr(inode);
666 depth = ext_depth(inode);
668 /* account possible depth increase */
670 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
673 return ERR_PTR(-ENOMEM);
680 /* walk through the tree */
682 int need_to_validate = 0;
684 ext_debug("depth %d: num %d, max %d\n",
685 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
687 ext4_ext_binsearch_idx(inode, path + ppos, block);
688 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
689 path[ppos].p_depth = i;
690 path[ppos].p_ext = NULL;
692 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
695 if (!bh_uptodate_or_lock(bh)) {
696 if (bh_submit_read(bh) < 0) {
700 /* validate the extent entries */
701 need_to_validate = 1;
703 eh = ext_block_hdr(bh);
705 if (unlikely(ppos > depth)) {
707 EXT4_ERROR_INODE(inode,
708 "ppos %d > depth %d", ppos, depth);
711 path[ppos].p_bh = bh;
712 path[ppos].p_hdr = eh;
715 if (need_to_validate && ext4_ext_check(inode, eh, i))
719 path[ppos].p_depth = i;
720 path[ppos].p_ext = NULL;
721 path[ppos].p_idx = NULL;
724 ext4_ext_binsearch(inode, path + ppos, block);
725 /* if not an empty leaf */
726 if (path[ppos].p_ext)
727 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
729 ext4_ext_show_path(inode, path);
734 ext4_ext_drop_refs(path);
737 return ERR_PTR(-EIO);
741 * ext4_ext_insert_index:
742 * insert new index [@logical;@ptr] into the block at @curp;
743 * check where to insert: before @curp or after @curp
745 int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
746 struct ext4_ext_path *curp,
747 int logical, ext4_fsblk_t ptr)
749 struct ext4_extent_idx *ix;
752 err = ext4_ext_get_access(handle, inode, curp);
756 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
757 EXT4_ERROR_INODE(inode,
758 "logical %d == ei_block %d!",
759 logical, le32_to_cpu(curp->p_idx->ei_block));
762 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
763 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
765 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
766 len = (len - 1) * sizeof(struct ext4_extent_idx);
767 len = len < 0 ? 0 : len;
768 ext_debug("insert new index %d after: %llu. "
769 "move %d from 0x%p to 0x%p\n",
771 (curp->p_idx + 1), (curp->p_idx + 2));
772 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
774 ix = curp->p_idx + 1;
777 len = len * sizeof(struct ext4_extent_idx);
778 len = len < 0 ? 0 : len;
779 ext_debug("insert new index %d before: %llu. "
780 "move %d from 0x%p to 0x%p\n",
782 curp->p_idx, (curp->p_idx + 1));
783 memmove(curp->p_idx + 1, curp->p_idx, len);
787 ix->ei_block = cpu_to_le32(logical);
788 ext4_idx_store_pblock(ix, ptr);
789 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
791 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
792 > le16_to_cpu(curp->p_hdr->eh_max))) {
793 EXT4_ERROR_INODE(inode,
794 "logical %d == ei_block %d!",
795 logical, le32_to_cpu(curp->p_idx->ei_block));
798 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
799 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
803 err = ext4_ext_dirty(handle, inode, curp);
804 ext4_std_error(inode->i_sb, err);
811 * inserts new subtree into the path, using free index entry
813 * - allocates all needed blocks (new leaf and all intermediate index blocks)
814 * - makes decision where to split
815 * - moves remaining extents and index entries (right to the split point)
816 * into the newly allocated blocks
817 * - initializes subtree
819 static int ext4_ext_split(handle_t *handle, struct inode *inode,
820 struct ext4_ext_path *path,
821 struct ext4_extent *newext, int at)
823 struct buffer_head *bh = NULL;
824 int depth = ext_depth(inode);
825 struct ext4_extent_header *neh;
826 struct ext4_extent_idx *fidx;
827 struct ext4_extent *ex;
829 ext4_fsblk_t newblock, oldblock;
831 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
834 /* make decision: where to split? */
835 /* FIXME: now decision is simplest: at current extent */
837 /* if current leaf will be split, then we should use
838 * border from split point */
839 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
840 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
843 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
844 border = path[depth].p_ext[1].ee_block;
845 ext_debug("leaf will be split."
846 " next leaf starts at %d\n",
847 le32_to_cpu(border));
849 border = newext->ee_block;
850 ext_debug("leaf will be added."
851 " next leaf starts at %d\n",
852 le32_to_cpu(border));
856 * If error occurs, then we break processing
857 * and mark filesystem read-only. index won't
858 * be inserted and tree will be in consistent
859 * state. Next mount will repair buffers too.
863 * Get array to track all allocated blocks.
864 * We need this to handle errors and free blocks
867 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
871 /* allocate all needed blocks */
872 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
873 for (a = 0; a < depth - at; a++) {
874 newblock = ext4_ext_new_meta_block(handle, inode, path,
878 ablocks[a] = newblock;
881 /* initialize new leaf */
882 newblock = ablocks[--a];
883 if (unlikely(newblock == 0)) {
884 EXT4_ERROR_INODE(inode, "newblock == 0!");
888 bh = sb_getblk(inode->i_sb, newblock);
895 err = ext4_journal_get_create_access(handle, bh);
899 neh = ext_block_hdr(bh);
901 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
902 neh->eh_magic = EXT4_EXT_MAGIC;
904 ex = EXT_FIRST_EXTENT(neh);
906 /* move remainder of path[depth] to the new leaf */
907 if (unlikely(path[depth].p_hdr->eh_entries !=
908 path[depth].p_hdr->eh_max)) {
909 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
910 path[depth].p_hdr->eh_entries,
911 path[depth].p_hdr->eh_max);
915 /* start copy from next extent */
916 /* TODO: we could do it by single memmove */
919 while (path[depth].p_ext <=
920 EXT_MAX_EXTENT(path[depth].p_hdr)) {
921 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
922 le32_to_cpu(path[depth].p_ext->ee_block),
923 ext_pblock(path[depth].p_ext),
924 ext4_ext_is_uninitialized(path[depth].p_ext),
925 ext4_ext_get_actual_len(path[depth].p_ext),
927 /*memmove(ex++, path[depth].p_ext++,
928 sizeof(struct ext4_extent));
934 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
935 le16_add_cpu(&neh->eh_entries, m);
938 set_buffer_uptodate(bh);
941 err = ext4_handle_dirty_metadata(handle, inode, bh);
947 /* correct old leaf */
949 err = ext4_ext_get_access(handle, inode, path + depth);
952 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
953 err = ext4_ext_dirty(handle, inode, path + depth);
959 /* create intermediate indexes */
961 if (unlikely(k < 0)) {
962 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
967 ext_debug("create %d intermediate indices\n", k);
968 /* insert new index into current index block */
969 /* current depth stored in i var */
973 newblock = ablocks[--a];
974 bh = sb_getblk(inode->i_sb, newblock);
981 err = ext4_journal_get_create_access(handle, bh);
985 neh = ext_block_hdr(bh);
986 neh->eh_entries = cpu_to_le16(1);
987 neh->eh_magic = EXT4_EXT_MAGIC;
988 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
989 neh->eh_depth = cpu_to_le16(depth - i);
990 fidx = EXT_FIRST_INDEX(neh);
991 fidx->ei_block = border;
992 ext4_idx_store_pblock(fidx, oldblock);
994 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
995 i, newblock, le32_to_cpu(border), oldblock);
1000 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1001 EXT_MAX_INDEX(path[i].p_hdr));
1002 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1003 EXT_LAST_INDEX(path[i].p_hdr))) {
1004 EXT4_ERROR_INODE(inode,
1005 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1006 le32_to_cpu(path[i].p_ext->ee_block));
1010 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
1011 ext_debug("%d: move %d:%llu in new index %llu\n", i,
1012 le32_to_cpu(path[i].p_idx->ei_block),
1013 idx_pblock(path[i].p_idx),
1015 /*memmove(++fidx, path[i].p_idx++,
1016 sizeof(struct ext4_extent_idx));
1018 BUG_ON(neh->eh_entries > neh->eh_max);*/
1023 memmove(++fidx, path[i].p_idx - m,
1024 sizeof(struct ext4_extent_idx) * m);
1025 le16_add_cpu(&neh->eh_entries, m);
1027 set_buffer_uptodate(bh);
1030 err = ext4_handle_dirty_metadata(handle, inode, bh);
1036 /* correct old index */
1038 err = ext4_ext_get_access(handle, inode, path + i);
1041 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1042 err = ext4_ext_dirty(handle, inode, path + i);
1050 /* insert new index */
1051 err = ext4_ext_insert_index(handle, inode, path + at,
1052 le32_to_cpu(border), newblock);
1056 if (buffer_locked(bh))
1062 /* free all allocated blocks in error case */
1063 for (i = 0; i < depth; i++) {
1066 ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
1067 EXT4_FREE_BLOCKS_METADATA);
1076 * ext4_ext_grow_indepth:
1077 * implements tree growing procedure:
1078 * - allocates new block
1079 * - moves top-level data (index block or leaf) into the new block
1080 * - initializes new top-level, creating index that points to the
1081 * just created block
1083 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1084 struct ext4_ext_path *path,
1085 struct ext4_extent *newext)
1087 struct ext4_ext_path *curp = path;
1088 struct ext4_extent_header *neh;
1089 struct ext4_extent_idx *fidx;
1090 struct buffer_head *bh;
1091 ext4_fsblk_t newblock;
1094 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
1098 bh = sb_getblk(inode->i_sb, newblock);
1101 ext4_std_error(inode->i_sb, err);
1106 err = ext4_journal_get_create_access(handle, bh);
1112 /* move top-level index/leaf into new block */
1113 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1115 /* set size of new block */
1116 neh = ext_block_hdr(bh);
1117 /* old root could have indexes or leaves
1118 * so calculate e_max right way */
1119 if (ext_depth(inode))
1120 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1122 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1123 neh->eh_magic = EXT4_EXT_MAGIC;
1124 set_buffer_uptodate(bh);
1127 err = ext4_handle_dirty_metadata(handle, inode, bh);
1131 /* create index in new top-level index: num,max,pointer */
1132 err = ext4_ext_get_access(handle, inode, curp);
1136 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1137 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1138 curp->p_hdr->eh_entries = cpu_to_le16(1);
1139 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1141 if (path[0].p_hdr->eh_depth)
1142 curp->p_idx->ei_block =
1143 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1145 curp->p_idx->ei_block =
1146 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1147 ext4_idx_store_pblock(curp->p_idx, newblock);
1149 neh = ext_inode_hdr(inode);
1150 fidx = EXT_FIRST_INDEX(neh);
1151 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1152 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1153 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
1155 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1156 err = ext4_ext_dirty(handle, inode, curp);
1164 * ext4_ext_create_new_leaf:
1165 * finds empty index and adds new leaf.
1166 * if no free index is found, then it requests in-depth growing.
1168 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1169 struct ext4_ext_path *path,
1170 struct ext4_extent *newext)
1172 struct ext4_ext_path *curp;
1173 int depth, i, err = 0;
1176 i = depth = ext_depth(inode);
1178 /* walk up to the tree and look for free index entry */
1179 curp = path + depth;
1180 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1185 /* we use already allocated block for index block,
1186 * so subsequent data blocks should be contiguous */
1187 if (EXT_HAS_FREE_INDEX(curp)) {
1188 /* if we found index with free entry, then use that
1189 * entry: create all needed subtree and add new leaf */
1190 err = ext4_ext_split(handle, inode, path, newext, i);
1195 ext4_ext_drop_refs(path);
1196 path = ext4_ext_find_extent(inode,
1197 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1200 err = PTR_ERR(path);
1202 /* tree is full, time to grow in depth */
1203 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1208 ext4_ext_drop_refs(path);
1209 path = ext4_ext_find_extent(inode,
1210 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1213 err = PTR_ERR(path);
1218 * only first (depth 0 -> 1) produces free space;
1219 * in all other cases we have to split the grown tree
1221 depth = ext_depth(inode);
1222 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1223 /* now we need to split */
1233 * search the closest allocated block to the left for *logical
1234 * and returns it at @logical + it's physical address at @phys
1235 * if *logical is the smallest allocated block, the function
1236 * returns 0 at @phys
1237 * return value contains 0 (success) or error code
1240 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1241 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1243 struct ext4_extent_idx *ix;
1244 struct ext4_extent *ex;
1247 if (unlikely(path == NULL)) {
1248 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1251 depth = path->p_depth;
1254 if (depth == 0 && path->p_ext == NULL)
1257 /* usually extent in the path covers blocks smaller
1258 * then *logical, but it can be that extent is the
1259 * first one in the file */
1261 ex = path[depth].p_ext;
1262 ee_len = ext4_ext_get_actual_len(ex);
1263 if (*logical < le32_to_cpu(ex->ee_block)) {
1264 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1265 EXT4_ERROR_INODE(inode,
1266 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1267 *logical, le32_to_cpu(ex->ee_block));
1270 while (--depth >= 0) {
1271 ix = path[depth].p_idx;
1272 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1273 EXT4_ERROR_INODE(inode,
1274 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1275 ix != NULL ? ix->ei_block : 0,
1276 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1277 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1285 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1286 EXT4_ERROR_INODE(inode,
1287 "logical %d < ee_block %d + ee_len %d!",
1288 *logical, le32_to_cpu(ex->ee_block), ee_len);
1292 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1293 *phys = ext_pblock(ex) + ee_len - 1;
1298 * search the closest allocated block to the right for *logical
1299 * and returns it at @logical + it's physical address at @phys
1300 * if *logical is the smallest allocated block, the function
1301 * returns 0 at @phys
1302 * return value contains 0 (success) or error code
1305 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1306 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1308 struct buffer_head *bh = NULL;
1309 struct ext4_extent_header *eh;
1310 struct ext4_extent_idx *ix;
1311 struct ext4_extent *ex;
1313 int depth; /* Note, NOT eh_depth; depth from top of tree */
1316 if (unlikely(path == NULL)) {
1317 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1320 depth = path->p_depth;
1323 if (depth == 0 && path->p_ext == NULL)
1326 /* usually extent in the path covers blocks smaller
1327 * then *logical, but it can be that extent is the
1328 * first one in the file */
1330 ex = path[depth].p_ext;
1331 ee_len = ext4_ext_get_actual_len(ex);
1332 if (*logical < le32_to_cpu(ex->ee_block)) {
1333 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1334 EXT4_ERROR_INODE(inode,
1335 "first_extent(path[%d].p_hdr) != ex",
1339 while (--depth >= 0) {
1340 ix = path[depth].p_idx;
1341 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1342 EXT4_ERROR_INODE(inode,
1343 "ix != EXT_FIRST_INDEX *logical %d!",
1348 *logical = le32_to_cpu(ex->ee_block);
1349 *phys = ext_pblock(ex);
1353 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1354 EXT4_ERROR_INODE(inode,
1355 "logical %d < ee_block %d + ee_len %d!",
1356 *logical, le32_to_cpu(ex->ee_block), ee_len);
1360 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1361 /* next allocated block in this leaf */
1363 *logical = le32_to_cpu(ex->ee_block);
1364 *phys = ext_pblock(ex);
1368 /* go up and search for index to the right */
1369 while (--depth >= 0) {
1370 ix = path[depth].p_idx;
1371 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1375 /* we've gone up to the root and found no index to the right */
1379 /* we've found index to the right, let's
1380 * follow it and find the closest allocated
1381 * block to the right */
1383 block = idx_pblock(ix);
1384 while (++depth < path->p_depth) {
1385 bh = sb_bread(inode->i_sb, block);
1388 eh = ext_block_hdr(bh);
1389 /* subtract from p_depth to get proper eh_depth */
1390 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1394 ix = EXT_FIRST_INDEX(eh);
1395 block = idx_pblock(ix);
1399 bh = sb_bread(inode->i_sb, block);
1402 eh = ext_block_hdr(bh);
1403 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1407 ex = EXT_FIRST_EXTENT(eh);
1408 *logical = le32_to_cpu(ex->ee_block);
1409 *phys = ext_pblock(ex);
1415 * ext4_ext_next_allocated_block:
1416 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1417 * NOTE: it considers block number from index entry as
1418 * allocated block. Thus, index entries have to be consistent
1422 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1426 BUG_ON(path == NULL);
1427 depth = path->p_depth;
1429 if (depth == 0 && path->p_ext == NULL)
1430 return EXT_MAX_BLOCK;
1432 while (depth >= 0) {
1433 if (depth == path->p_depth) {
1435 if (path[depth].p_ext !=
1436 EXT_LAST_EXTENT(path[depth].p_hdr))
1437 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1440 if (path[depth].p_idx !=
1441 EXT_LAST_INDEX(path[depth].p_hdr))
1442 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1447 return EXT_MAX_BLOCK;
1451 * ext4_ext_next_leaf_block:
1452 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1454 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1455 struct ext4_ext_path *path)
1459 BUG_ON(path == NULL);
1460 depth = path->p_depth;
1462 /* zero-tree has no leaf blocks at all */
1464 return EXT_MAX_BLOCK;
1466 /* go to index block */
1469 while (depth >= 0) {
1470 if (path[depth].p_idx !=
1471 EXT_LAST_INDEX(path[depth].p_hdr))
1472 return (ext4_lblk_t)
1473 le32_to_cpu(path[depth].p_idx[1].ei_block);
1477 return EXT_MAX_BLOCK;
1481 * ext4_ext_correct_indexes:
1482 * if leaf gets modified and modified extent is first in the leaf,
1483 * then we have to correct all indexes above.
1484 * TODO: do we need to correct tree in all cases?
1486 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1487 struct ext4_ext_path *path)
1489 struct ext4_extent_header *eh;
1490 int depth = ext_depth(inode);
1491 struct ext4_extent *ex;
1495 eh = path[depth].p_hdr;
1496 ex = path[depth].p_ext;
1498 if (unlikely(ex == NULL || eh == NULL)) {
1499 EXT4_ERROR_INODE(inode,
1500 "ex %p == NULL or eh %p == NULL", ex, eh);
1505 /* there is no tree at all */
1509 if (ex != EXT_FIRST_EXTENT(eh)) {
1510 /* we correct tree if first leaf got modified only */
1515 * TODO: we need correction if border is smaller than current one
1518 border = path[depth].p_ext->ee_block;
1519 err = ext4_ext_get_access(handle, inode, path + k);
1522 path[k].p_idx->ei_block = border;
1523 err = ext4_ext_dirty(handle, inode, path + k);
1528 /* change all left-side indexes */
1529 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1531 err = ext4_ext_get_access(handle, inode, path + k);
1534 path[k].p_idx->ei_block = border;
1535 err = ext4_ext_dirty(handle, inode, path + k);
1544 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1545 struct ext4_extent *ex2)
1547 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1550 * Make sure that either both extents are uninitialized, or
1553 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1556 if (ext4_ext_is_uninitialized(ex1))
1557 max_len = EXT_UNINIT_MAX_LEN;
1559 max_len = EXT_INIT_MAX_LEN;
1561 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1562 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1564 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1565 le32_to_cpu(ex2->ee_block))
1569 * To allow future support for preallocated extents to be added
1570 * as an RO_COMPAT feature, refuse to merge to extents if
1571 * this can result in the top bit of ee_len being set.
1573 if (ext1_ee_len + ext2_ee_len > max_len)
1575 #ifdef AGGRESSIVE_TEST
1576 if (ext1_ee_len >= 4)
1580 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1586 * This function tries to merge the "ex" extent to the next extent in the tree.
1587 * It always tries to merge towards right. If you want to merge towards
1588 * left, pass "ex - 1" as argument instead of "ex".
1589 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1590 * 1 if they got merged.
1592 int ext4_ext_try_to_merge(struct inode *inode,
1593 struct ext4_ext_path *path,
1594 struct ext4_extent *ex)
1596 struct ext4_extent_header *eh;
1597 unsigned int depth, len;
1599 int uninitialized = 0;
1601 depth = ext_depth(inode);
1602 BUG_ON(path[depth].p_hdr == NULL);
1603 eh = path[depth].p_hdr;
1605 while (ex < EXT_LAST_EXTENT(eh)) {
1606 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1608 /* merge with next extent! */
1609 if (ext4_ext_is_uninitialized(ex))
1611 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1612 + ext4_ext_get_actual_len(ex + 1));
1614 ext4_ext_mark_uninitialized(ex);
1616 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1617 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1618 * sizeof(struct ext4_extent);
1619 memmove(ex + 1, ex + 2, len);
1621 le16_add_cpu(&eh->eh_entries, -1);
1623 WARN_ON(eh->eh_entries == 0);
1624 if (!eh->eh_entries)
1625 ext4_error(inode->i_sb,
1626 "inode#%lu, eh->eh_entries = 0!",
1634 * check if a portion of the "newext" extent overlaps with an
1637 * If there is an overlap discovered, it updates the length of the newext
1638 * such that there will be no overlap, and then returns 1.
1639 * If there is no overlap found, it returns 0.
1641 unsigned int ext4_ext_check_overlap(struct inode *inode,
1642 struct ext4_extent *newext,
1643 struct ext4_ext_path *path)
1646 unsigned int depth, len1;
1647 unsigned int ret = 0;
1649 b1 = le32_to_cpu(newext->ee_block);
1650 len1 = ext4_ext_get_actual_len(newext);
1651 depth = ext_depth(inode);
1652 if (!path[depth].p_ext)
1654 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1657 * get the next allocated block if the extent in the path
1658 * is before the requested block(s)
1661 b2 = ext4_ext_next_allocated_block(path);
1662 if (b2 == EXT_MAX_BLOCK)
1666 /* check for wrap through zero on extent logical start block*/
1667 if (b1 + len1 < b1) {
1668 len1 = EXT_MAX_BLOCK - b1;
1669 newext->ee_len = cpu_to_le16(len1);
1673 /* check for overlap */
1674 if (b1 + len1 > b2) {
1675 newext->ee_len = cpu_to_le16(b2 - b1);
1683 * ext4_ext_insert_extent:
1684 * tries to merge requsted extent into the existing extent or
1685 * inserts requested extent as new one into the tree,
1686 * creating new leaf in the no-space case.
1688 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1689 struct ext4_ext_path *path,
1690 struct ext4_extent *newext, int flag)
1692 struct ext4_extent_header *eh;
1693 struct ext4_extent *ex, *fex;
1694 struct ext4_extent *nearex; /* nearest extent */
1695 struct ext4_ext_path *npath = NULL;
1696 int depth, len, err;
1698 unsigned uninitialized = 0;
1700 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1701 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1704 depth = ext_depth(inode);
1705 ex = path[depth].p_ext;
1706 if (unlikely(path[depth].p_hdr == NULL)) {
1707 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1711 /* try to insert block into found extent and return */
1712 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1713 && ext4_can_extents_be_merged(inode, ex, newext)) {
1714 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1715 ext4_ext_is_uninitialized(newext),
1716 ext4_ext_get_actual_len(newext),
1717 le32_to_cpu(ex->ee_block),
1718 ext4_ext_is_uninitialized(ex),
1719 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1720 err = ext4_ext_get_access(handle, inode, path + depth);
1725 * ext4_can_extents_be_merged should have checked that either
1726 * both extents are uninitialized, or both aren't. Thus we
1727 * need to check only one of them here.
1729 if (ext4_ext_is_uninitialized(ex))
1731 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1732 + ext4_ext_get_actual_len(newext));
1734 ext4_ext_mark_uninitialized(ex);
1735 eh = path[depth].p_hdr;
1741 depth = ext_depth(inode);
1742 eh = path[depth].p_hdr;
1743 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1746 /* probably next leaf has space for us? */
1747 fex = EXT_LAST_EXTENT(eh);
1748 next = ext4_ext_next_leaf_block(inode, path);
1749 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1750 && next != EXT_MAX_BLOCK) {
1751 ext_debug("next leaf block - %d\n", next);
1752 BUG_ON(npath != NULL);
1753 npath = ext4_ext_find_extent(inode, next, NULL);
1755 return PTR_ERR(npath);
1756 BUG_ON(npath->p_depth != path->p_depth);
1757 eh = npath[depth].p_hdr;
1758 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1759 ext_debug("next leaf isnt full(%d)\n",
1760 le16_to_cpu(eh->eh_entries));
1764 ext_debug("next leaf has no free space(%d,%d)\n",
1765 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1769 * There is no free space in the found leaf.
1770 * We're gonna add a new leaf in the tree.
1772 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1775 depth = ext_depth(inode);
1776 eh = path[depth].p_hdr;
1779 nearex = path[depth].p_ext;
1781 err = ext4_ext_get_access(handle, inode, path + depth);
1786 /* there is no extent in this leaf, create first one */
1787 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1788 le32_to_cpu(newext->ee_block),
1790 ext4_ext_is_uninitialized(newext),
1791 ext4_ext_get_actual_len(newext));
1792 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1793 } else if (le32_to_cpu(newext->ee_block)
1794 > le32_to_cpu(nearex->ee_block)) {
1795 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1796 if (nearex != EXT_LAST_EXTENT(eh)) {
1797 len = EXT_MAX_EXTENT(eh) - nearex;
1798 len = (len - 1) * sizeof(struct ext4_extent);
1799 len = len < 0 ? 0 : len;
1800 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1801 "move %d from 0x%p to 0x%p\n",
1802 le32_to_cpu(newext->ee_block),
1804 ext4_ext_is_uninitialized(newext),
1805 ext4_ext_get_actual_len(newext),
1806 nearex, len, nearex + 1, nearex + 2);
1807 memmove(nearex + 2, nearex + 1, len);
1809 path[depth].p_ext = nearex + 1;
1811 BUG_ON(newext->ee_block == nearex->ee_block);
1812 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1813 len = len < 0 ? 0 : len;
1814 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1815 "move %d from 0x%p to 0x%p\n",
1816 le32_to_cpu(newext->ee_block),
1818 ext4_ext_is_uninitialized(newext),
1819 ext4_ext_get_actual_len(newext),
1820 nearex, len, nearex + 1, nearex + 2);
1821 memmove(nearex + 1, nearex, len);
1822 path[depth].p_ext = nearex;
1825 le16_add_cpu(&eh->eh_entries, 1);
1826 nearex = path[depth].p_ext;
1827 nearex->ee_block = newext->ee_block;
1828 ext4_ext_store_pblock(nearex, ext_pblock(newext));
1829 nearex->ee_len = newext->ee_len;
1832 /* try to merge extents to the right */
1833 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1834 ext4_ext_try_to_merge(inode, path, nearex);
1836 /* try to merge extents to the left */
1838 /* time to correct all indexes above */
1839 err = ext4_ext_correct_indexes(handle, inode, path);
1843 err = ext4_ext_dirty(handle, inode, path + depth);
1847 ext4_ext_drop_refs(npath);
1850 ext4_ext_invalidate_cache(inode);
1854 int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1855 ext4_lblk_t num, ext_prepare_callback func,
1858 struct ext4_ext_path *path = NULL;
1859 struct ext4_ext_cache cbex;
1860 struct ext4_extent *ex;
1861 ext4_lblk_t next, start = 0, end = 0;
1862 ext4_lblk_t last = block + num;
1863 int depth, exists, err = 0;
1865 BUG_ON(func == NULL);
1866 BUG_ON(inode == NULL);
1868 while (block < last && block != EXT_MAX_BLOCK) {
1870 /* find extent for this block */
1871 down_read(&EXT4_I(inode)->i_data_sem);
1872 path = ext4_ext_find_extent(inode, block, path);
1873 up_read(&EXT4_I(inode)->i_data_sem);
1875 err = PTR_ERR(path);
1880 depth = ext_depth(inode);
1881 if (unlikely(path[depth].p_hdr == NULL)) {
1882 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1886 ex = path[depth].p_ext;
1887 next = ext4_ext_next_allocated_block(path);
1891 /* there is no extent yet, so try to allocate
1892 * all requested space */
1895 } else if (le32_to_cpu(ex->ee_block) > block) {
1896 /* need to allocate space before found extent */
1898 end = le32_to_cpu(ex->ee_block);
1899 if (block + num < end)
1901 } else if (block >= le32_to_cpu(ex->ee_block)
1902 + ext4_ext_get_actual_len(ex)) {
1903 /* need to allocate space after found extent */
1908 } else if (block >= le32_to_cpu(ex->ee_block)) {
1910 * some part of requested space is covered
1914 end = le32_to_cpu(ex->ee_block)
1915 + ext4_ext_get_actual_len(ex);
1916 if (block + num < end)
1922 BUG_ON(end <= start);
1925 cbex.ec_block = start;
1926 cbex.ec_len = end - start;
1928 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1930 cbex.ec_block = le32_to_cpu(ex->ee_block);
1931 cbex.ec_len = ext4_ext_get_actual_len(ex);
1932 cbex.ec_start = ext_pblock(ex);
1933 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1936 if (unlikely(cbex.ec_len == 0)) {
1937 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1941 err = func(inode, path, &cbex, ex, cbdata);
1942 ext4_ext_drop_refs(path);
1947 if (err == EXT_REPEAT)
1949 else if (err == EXT_BREAK) {
1954 if (ext_depth(inode) != depth) {
1955 /* depth was changed. we have to realloc path */
1960 block = cbex.ec_block + cbex.ec_len;
1964 ext4_ext_drop_refs(path);
1972 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1973 __u32 len, ext4_fsblk_t start, int type)
1975 struct ext4_ext_cache *cex;
1977 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1978 cex = &EXT4_I(inode)->i_cached_extent;
1979 cex->ec_type = type;
1980 cex->ec_block = block;
1982 cex->ec_start = start;
1983 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1987 * ext4_ext_put_gap_in_cache:
1988 * calculate boundaries of the gap that the requested block fits into
1989 * and cache this gap
1992 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1995 int depth = ext_depth(inode);
1998 struct ext4_extent *ex;
2000 ex = path[depth].p_ext;
2002 /* there is no extent yet, so gap is [0;-] */
2004 len = EXT_MAX_BLOCK;
2005 ext_debug("cache gap(whole file):");
2006 } else if (block < le32_to_cpu(ex->ee_block)) {
2008 len = le32_to_cpu(ex->ee_block) - block;
2009 ext_debug("cache gap(before): %u [%u:%u]",
2011 le32_to_cpu(ex->ee_block),
2012 ext4_ext_get_actual_len(ex));
2013 } else if (block >= le32_to_cpu(ex->ee_block)
2014 + ext4_ext_get_actual_len(ex)) {
2016 lblock = le32_to_cpu(ex->ee_block)
2017 + ext4_ext_get_actual_len(ex);
2019 next = ext4_ext_next_allocated_block(path);
2020 ext_debug("cache gap(after): [%u:%u] %u",
2021 le32_to_cpu(ex->ee_block),
2022 ext4_ext_get_actual_len(ex),
2024 BUG_ON(next == lblock);
2025 len = next - lblock;
2031 ext_debug(" -> %u:%lu\n", lblock, len);
2032 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
2036 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2037 struct ext4_extent *ex)
2039 struct ext4_ext_cache *cex;
2040 int ret = EXT4_EXT_CACHE_NO;
2043 * We borrow i_block_reservation_lock to protect i_cached_extent
2045 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2046 cex = &EXT4_I(inode)->i_cached_extent;
2048 /* has cache valid data? */
2049 if (cex->ec_type == EXT4_EXT_CACHE_NO)
2052 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
2053 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2054 if (in_range(block, cex->ec_block, cex->ec_len)) {
2055 ex->ee_block = cpu_to_le32(cex->ec_block);
2056 ext4_ext_store_pblock(ex, cex->ec_start);
2057 ex->ee_len = cpu_to_le16(cex->ec_len);
2058 ext_debug("%u cached by %u:%u:%llu\n",
2060 cex->ec_block, cex->ec_len, cex->ec_start);
2064 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2070 * removes index from the index block.
2071 * It's used in truncate case only, thus all requests are for
2072 * last index in the block only.
2074 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2075 struct ext4_ext_path *path)
2080 /* free index block */
2082 leaf = idx_pblock(path->p_idx);
2083 if (unlikely(path->p_hdr->eh_entries == 0)) {
2084 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2087 err = ext4_ext_get_access(handle, inode, path);
2090 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2091 err = ext4_ext_dirty(handle, inode, path);
2094 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2095 ext4_free_blocks(handle, inode, 0, leaf, 1,
2096 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2101 * ext4_ext_calc_credits_for_single_extent:
2102 * This routine returns max. credits that needed to insert an extent
2103 * to the extent tree.
2104 * When pass the actual path, the caller should calculate credits
2107 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2108 struct ext4_ext_path *path)
2111 int depth = ext_depth(inode);
2114 /* probably there is space in leaf? */
2115 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2116 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2119 * There are some space in the leaf tree, no
2120 * need to account for leaf block credit
2122 * bitmaps and block group descriptor blocks
2123 * and other metadat blocks still need to be
2126 /* 1 bitmap, 1 block group descriptor */
2127 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2132 return ext4_chunk_trans_blocks(inode, nrblocks);
2136 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2138 * if nrblocks are fit in a single extent (chunk flag is 1), then
2139 * in the worse case, each tree level index/leaf need to be changed
2140 * if the tree split due to insert a new extent, then the old tree
2141 * index/leaf need to be updated too
2143 * If the nrblocks are discontiguous, they could cause
2144 * the whole tree split more than once, but this is really rare.
2146 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2149 int depth = ext_depth(inode);
2159 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2160 struct ext4_extent *ex,
2161 ext4_lblk_t from, ext4_lblk_t to)
2163 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2164 int flags = EXT4_FREE_BLOCKS_FORGET;
2166 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2167 flags |= EXT4_FREE_BLOCKS_METADATA;
2168 #ifdef EXTENTS_STATS
2170 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2171 spin_lock(&sbi->s_ext_stats_lock);
2172 sbi->s_ext_blocks += ee_len;
2173 sbi->s_ext_extents++;
2174 if (ee_len < sbi->s_ext_min)
2175 sbi->s_ext_min = ee_len;
2176 if (ee_len > sbi->s_ext_max)
2177 sbi->s_ext_max = ee_len;
2178 if (ext_depth(inode) > sbi->s_depth_max)
2179 sbi->s_depth_max = ext_depth(inode);
2180 spin_unlock(&sbi->s_ext_stats_lock);
2183 if (from >= le32_to_cpu(ex->ee_block)
2184 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2189 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2190 start = ext_pblock(ex) + ee_len - num;
2191 ext_debug("free last %u blocks starting %llu\n", num, start);
2192 ext4_free_blocks(handle, inode, 0, start, num, flags);
2193 } else if (from == le32_to_cpu(ex->ee_block)
2194 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2195 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
2196 from, to, le32_to_cpu(ex->ee_block), ee_len);
2198 printk(KERN_INFO "strange request: removal(2) "
2199 "%u-%u from %u:%u\n",
2200 from, to, le32_to_cpu(ex->ee_block), ee_len);
2206 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2207 struct ext4_ext_path *path, ext4_lblk_t start)
2209 int err = 0, correct_index = 0;
2210 int depth = ext_depth(inode), credits;
2211 struct ext4_extent_header *eh;
2212 ext4_lblk_t a, b, block;
2214 ext4_lblk_t ex_ee_block;
2215 unsigned short ex_ee_len;
2216 unsigned uninitialized = 0;
2217 struct ext4_extent *ex;
2219 /* the header must be checked already in ext4_ext_remove_space() */
2220 ext_debug("truncate since %u in leaf\n", start);
2221 if (!path[depth].p_hdr)
2222 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2223 eh = path[depth].p_hdr;
2224 if (unlikely(path[depth].p_hdr == NULL)) {
2225 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2228 /* find where to start removing */
2229 ex = EXT_LAST_EXTENT(eh);
2231 ex_ee_block = le32_to_cpu(ex->ee_block);
2232 ex_ee_len = ext4_ext_get_actual_len(ex);
2234 while (ex >= EXT_FIRST_EXTENT(eh) &&
2235 ex_ee_block + ex_ee_len > start) {
2237 if (ext4_ext_is_uninitialized(ex))
2242 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2243 uninitialized, ex_ee_len);
2244 path[depth].p_ext = ex;
2246 a = ex_ee_block > start ? ex_ee_block : start;
2247 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2248 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2250 ext_debug(" border %u:%u\n", a, b);
2252 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2256 } else if (a != ex_ee_block) {
2257 /* remove tail of the extent */
2258 block = ex_ee_block;
2260 } else if (b != ex_ee_block + ex_ee_len - 1) {
2261 /* remove head of the extent */
2264 /* there is no "make a hole" API yet */
2267 /* remove whole extent: excellent! */
2268 block = ex_ee_block;
2270 BUG_ON(a != ex_ee_block);
2271 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2275 * 3 for leaf, sb, and inode plus 2 (bmap and group
2276 * descriptor) for each block group; assume two block
2277 * groups plus ex_ee_len/blocks_per_block_group for
2280 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2281 if (ex == EXT_FIRST_EXTENT(eh)) {
2283 credits += (ext_depth(inode)) + 1;
2285 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2287 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2291 err = ext4_ext_get_access(handle, inode, path + depth);
2295 err = ext4_remove_blocks(handle, inode, ex, a, b);
2300 /* this extent is removed; mark slot entirely unused */
2301 ext4_ext_store_pblock(ex, 0);
2302 le16_add_cpu(&eh->eh_entries, -1);
2305 ex->ee_block = cpu_to_le32(block);
2306 ex->ee_len = cpu_to_le16(num);
2308 * Do not mark uninitialized if all the blocks in the
2309 * extent have been removed.
2311 if (uninitialized && num)
2312 ext4_ext_mark_uninitialized(ex);
2314 err = ext4_ext_dirty(handle, inode, path + depth);
2318 ext_debug("new extent: %u:%u:%llu\n", block, num,
2321 ex_ee_block = le32_to_cpu(ex->ee_block);
2322 ex_ee_len = ext4_ext_get_actual_len(ex);
2325 if (correct_index && eh->eh_entries)
2326 err = ext4_ext_correct_indexes(handle, inode, path);
2328 /* if this leaf is free, then we should
2329 * remove it from index block above */
2330 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2331 err = ext4_ext_rm_idx(handle, inode, path + depth);
2338 * ext4_ext_more_to_rm:
2339 * returns 1 if current index has to be freed (even partial)
2342 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2344 BUG_ON(path->p_idx == NULL);
2346 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2350 * if truncate on deeper level happened, it wasn't partial,
2351 * so we have to consider current index for truncation
2353 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2358 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2360 struct super_block *sb = inode->i_sb;
2361 int depth = ext_depth(inode);
2362 struct ext4_ext_path *path;
2366 ext_debug("truncate since %u\n", start);
2368 /* probably first extent we're gonna free will be last in block */
2369 handle = ext4_journal_start(inode, depth + 1);
2371 return PTR_ERR(handle);
2373 ext4_ext_invalidate_cache(inode);
2376 * We start scanning from right side, freeing all the blocks
2377 * after i_size and walking into the tree depth-wise.
2379 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2381 ext4_journal_stop(handle);
2384 path[0].p_hdr = ext_inode_hdr(inode);
2385 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2389 path[0].p_depth = depth;
2391 while (i >= 0 && err == 0) {
2393 /* this is leaf block */
2394 err = ext4_ext_rm_leaf(handle, inode, path, start);
2395 /* root level has p_bh == NULL, brelse() eats this */
2396 brelse(path[i].p_bh);
2397 path[i].p_bh = NULL;
2402 /* this is index block */
2403 if (!path[i].p_hdr) {
2404 ext_debug("initialize header\n");
2405 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2408 if (!path[i].p_idx) {
2409 /* this level hasn't been touched yet */
2410 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2411 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2412 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2414 le16_to_cpu(path[i].p_hdr->eh_entries));
2416 /* we were already here, see at next index */
2420 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2421 i, EXT_FIRST_INDEX(path[i].p_hdr),
2423 if (ext4_ext_more_to_rm(path + i)) {
2424 struct buffer_head *bh;
2425 /* go to the next level */
2426 ext_debug("move to level %d (block %llu)\n",
2427 i + 1, idx_pblock(path[i].p_idx));
2428 memset(path + i + 1, 0, sizeof(*path));
2429 bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2431 /* should we reset i_size? */
2435 if (WARN_ON(i + 1 > depth)) {
2439 if (ext4_ext_check(inode, ext_block_hdr(bh),
2444 path[i + 1].p_bh = bh;
2446 /* save actual number of indexes since this
2447 * number is changed at the next iteration */
2448 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2451 /* we finished processing this index, go up */
2452 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2453 /* index is empty, remove it;
2454 * handle must be already prepared by the
2455 * truncatei_leaf() */
2456 err = ext4_ext_rm_idx(handle, inode, path + i);
2458 /* root level has p_bh == NULL, brelse() eats this */
2459 brelse(path[i].p_bh);
2460 path[i].p_bh = NULL;
2462 ext_debug("return to level %d\n", i);
2466 /* TODO: flexible tree reduction should be here */
2467 if (path->p_hdr->eh_entries == 0) {
2469 * truncate to zero freed all the tree,
2470 * so we need to correct eh_depth
2472 err = ext4_ext_get_access(handle, inode, path);
2474 ext_inode_hdr(inode)->eh_depth = 0;
2475 ext_inode_hdr(inode)->eh_max =
2476 cpu_to_le16(ext4_ext_space_root(inode, 0));
2477 err = ext4_ext_dirty(handle, inode, path);
2481 ext4_ext_drop_refs(path);
2483 ext4_journal_stop(handle);
2489 * called at mount time
2491 void ext4_ext_init(struct super_block *sb)
2494 * possible initialization would be here
2497 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2498 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2499 printk(KERN_INFO "EXT4-fs: file extents enabled");
2500 #ifdef AGGRESSIVE_TEST
2501 printk(", aggressive tests");
2503 #ifdef CHECK_BINSEARCH
2504 printk(", check binsearch");
2506 #ifdef EXTENTS_STATS
2511 #ifdef EXTENTS_STATS
2512 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2513 EXT4_SB(sb)->s_ext_min = 1 << 30;
2514 EXT4_SB(sb)->s_ext_max = 0;
2520 * called at umount time
2522 void ext4_ext_release(struct super_block *sb)
2524 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2527 #ifdef EXTENTS_STATS
2528 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2529 struct ext4_sb_info *sbi = EXT4_SB(sb);
2530 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2531 sbi->s_ext_blocks, sbi->s_ext_extents,
2532 sbi->s_ext_blocks / sbi->s_ext_extents);
2533 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2534 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2539 static void bi_complete(struct bio *bio, int error)
2541 complete((struct completion *)bio->bi_private);
2544 /* FIXME!! we need to try to merge to left or right after zero-out */
2545 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2549 int blkbits, blocksize;
2551 struct completion event;
2552 unsigned int ee_len, len, done, offset;
2555 blkbits = inode->i_blkbits;
2556 blocksize = inode->i_sb->s_blocksize;
2557 ee_len = ext4_ext_get_actual_len(ex);
2558 ee_pblock = ext_pblock(ex);
2560 /* convert ee_pblock to 512 byte sectors */
2561 ee_pblock = ee_pblock << (blkbits - 9);
2563 while (ee_len > 0) {
2565 if (ee_len > BIO_MAX_PAGES)
2566 len = BIO_MAX_PAGES;
2570 bio = bio_alloc(GFP_NOIO, len);
2571 bio->bi_sector = ee_pblock;
2572 bio->bi_bdev = inode->i_sb->s_bdev;
2576 while (done < len) {
2577 ret = bio_add_page(bio, ZERO_PAGE(0),
2579 if (ret != blocksize) {
2581 * We can't add any more pages because of
2582 * hardware limitations. Start a new bio.
2587 offset += blocksize;
2588 if (offset >= PAGE_CACHE_SIZE)
2592 init_completion(&event);
2593 bio->bi_private = &event;
2594 bio->bi_end_io = bi_complete;
2595 submit_bio(WRITE, bio);
2596 wait_for_completion(&event);
2598 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2606 ee_pblock += done << (blkbits - 9);
2611 #define EXT4_EXT_ZERO_LEN 7
2613 * This function is called by ext4_ext_get_blocks() if someone tries to write
2614 * to an uninitialized extent. It may result in splitting the uninitialized
2615 * extent into multiple extents (upto three - one initialized and two
2617 * There are three possibilities:
2618 * a> There is no split required: Entire extent should be initialized
2619 * b> Splits in two extents: Write is happening at either end of the extent
2620 * c> Splits in three extents: Somone is writing in middle of the extent
2622 static int ext4_ext_convert_to_initialized(handle_t *handle,
2623 struct inode *inode,
2624 struct ext4_ext_path *path,
2626 unsigned int max_blocks)
2628 struct ext4_extent *ex, newex, orig_ex;
2629 struct ext4_extent *ex1 = NULL;
2630 struct ext4_extent *ex2 = NULL;
2631 struct ext4_extent *ex3 = NULL;
2632 struct ext4_extent_header *eh;
2633 ext4_lblk_t ee_block;
2634 unsigned int allocated, ee_len, depth;
2635 ext4_fsblk_t newblock;
2639 depth = ext_depth(inode);
2640 eh = path[depth].p_hdr;
2641 ex = path[depth].p_ext;
2642 ee_block = le32_to_cpu(ex->ee_block);
2643 ee_len = ext4_ext_get_actual_len(ex);
2644 allocated = ee_len - (iblock - ee_block);
2645 newblock = iblock - ee_block + ext_pblock(ex);
2647 orig_ex.ee_block = ex->ee_block;
2648 orig_ex.ee_len = cpu_to_le16(ee_len);
2649 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2651 err = ext4_ext_get_access(handle, inode, path + depth);
2654 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2655 if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2656 err = ext4_ext_zeroout(inode, &orig_ex);
2658 goto fix_extent_len;
2659 /* update the extent length and mark as initialized */
2660 ex->ee_block = orig_ex.ee_block;
2661 ex->ee_len = orig_ex.ee_len;
2662 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2663 ext4_ext_dirty(handle, inode, path + depth);
2664 /* zeroed the full extent */
2668 /* ex1: ee_block to iblock - 1 : uninitialized */
2669 if (iblock > ee_block) {
2671 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2672 ext4_ext_mark_uninitialized(ex1);
2676 * for sanity, update the length of the ex2 extent before
2677 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2678 * overlap of blocks.
2680 if (!ex1 && allocated > max_blocks)
2681 ex2->ee_len = cpu_to_le16(max_blocks);
2682 /* ex3: to ee_block + ee_len : uninitialised */
2683 if (allocated > max_blocks) {
2684 unsigned int newdepth;
2685 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2686 if (allocated <= EXT4_EXT_ZERO_LEN) {
2688 * iblock == ee_block is handled by the zerouout
2690 * Mark first half uninitialized.
2691 * Mark second half initialized and zero out the
2692 * initialized extent
2694 ex->ee_block = orig_ex.ee_block;
2695 ex->ee_len = cpu_to_le16(ee_len - allocated);
2696 ext4_ext_mark_uninitialized(ex);
2697 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2698 ext4_ext_dirty(handle, inode, path + depth);
2701 ex3->ee_block = cpu_to_le32(iblock);
2702 ext4_ext_store_pblock(ex3, newblock);
2703 ex3->ee_len = cpu_to_le16(allocated);
2704 err = ext4_ext_insert_extent(handle, inode, path,
2706 if (err == -ENOSPC) {
2707 err = ext4_ext_zeroout(inode, &orig_ex);
2709 goto fix_extent_len;
2710 ex->ee_block = orig_ex.ee_block;
2711 ex->ee_len = orig_ex.ee_len;
2712 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2713 ext4_ext_dirty(handle, inode, path + depth);
2714 /* blocks available from iblock */
2718 goto fix_extent_len;
2721 * We need to zero out the second half because
2722 * an fallocate request can update file size and
2723 * converting the second half to initialized extent
2724 * implies that we can leak some junk data to user
2727 err = ext4_ext_zeroout(inode, ex3);
2730 * We should actually mark the
2731 * second half as uninit and return error
2732 * Insert would have changed the extent
2734 depth = ext_depth(inode);
2735 ext4_ext_drop_refs(path);
2736 path = ext4_ext_find_extent(inode,
2739 err = PTR_ERR(path);
2742 /* get the second half extent details */
2743 ex = path[depth].p_ext;
2744 err = ext4_ext_get_access(handle, inode,
2748 ext4_ext_mark_uninitialized(ex);
2749 ext4_ext_dirty(handle, inode, path + depth);
2753 /* zeroed the second half */
2757 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2758 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2759 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2760 ext4_ext_mark_uninitialized(ex3);
2761 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2762 if (err == -ENOSPC) {
2763 err = ext4_ext_zeroout(inode, &orig_ex);
2765 goto fix_extent_len;
2766 /* update the extent length and mark as initialized */
2767 ex->ee_block = orig_ex.ee_block;
2768 ex->ee_len = orig_ex.ee_len;
2769 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2770 ext4_ext_dirty(handle, inode, path + depth);
2771 /* zeroed the full extent */
2772 /* blocks available from iblock */
2776 goto fix_extent_len;
2778 * The depth, and hence eh & ex might change
2779 * as part of the insert above.
2781 newdepth = ext_depth(inode);
2783 * update the extent length after successful insert of the
2786 orig_ex.ee_len = cpu_to_le16(ee_len -
2787 ext4_ext_get_actual_len(ex3));
2789 ext4_ext_drop_refs(path);
2790 path = ext4_ext_find_extent(inode, iblock, path);
2792 err = PTR_ERR(path);
2795 eh = path[depth].p_hdr;
2796 ex = path[depth].p_ext;
2800 err = ext4_ext_get_access(handle, inode, path + depth);
2804 allocated = max_blocks;
2806 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2807 * to insert a extent in the middle zerout directly
2808 * otherwise give the extent a chance to merge to left
2810 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2811 iblock != ee_block) {
2812 err = ext4_ext_zeroout(inode, &orig_ex);
2814 goto fix_extent_len;
2815 /* update the extent length and mark as initialized */
2816 ex->ee_block = orig_ex.ee_block;
2817 ex->ee_len = orig_ex.ee_len;
2818 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2819 ext4_ext_dirty(handle, inode, path + depth);
2820 /* zero out the first half */
2821 /* blocks available from iblock */
2826 * If there was a change of depth as part of the
2827 * insertion of ex3 above, we need to update the length
2828 * of the ex1 extent again here
2830 if (ex1 && ex1 != ex) {
2832 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2833 ext4_ext_mark_uninitialized(ex1);
2836 /* ex2: iblock to iblock + maxblocks-1 : initialised */
2837 ex2->ee_block = cpu_to_le32(iblock);
2838 ext4_ext_store_pblock(ex2, newblock);
2839 ex2->ee_len = cpu_to_le16(allocated);
2843 * New (initialized) extent starts from the first block
2844 * in the current extent. i.e., ex2 == ex
2845 * We have to see if it can be merged with the extent
2848 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2850 * To merge left, pass "ex2 - 1" to try_to_merge(),
2851 * since it merges towards right _only_.
2853 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2855 err = ext4_ext_correct_indexes(handle, inode, path);
2858 depth = ext_depth(inode);
2863 * Try to Merge towards right. This might be required
2864 * only when the whole extent is being written to.
2865 * i.e. ex2 == ex and ex3 == NULL.
2868 ret = ext4_ext_try_to_merge(inode, path, ex2);
2870 err = ext4_ext_correct_indexes(handle, inode, path);
2875 /* Mark modified extent as dirty */
2876 err = ext4_ext_dirty(handle, inode, path + depth);
2879 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2880 if (err == -ENOSPC) {
2881 err = ext4_ext_zeroout(inode, &orig_ex);
2883 goto fix_extent_len;
2884 /* update the extent length and mark as initialized */
2885 ex->ee_block = orig_ex.ee_block;
2886 ex->ee_len = orig_ex.ee_len;
2887 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2888 ext4_ext_dirty(handle, inode, path + depth);
2889 /* zero out the first half */
2892 goto fix_extent_len;
2894 ext4_ext_show_leaf(inode, path);
2895 return err ? err : allocated;
2898 ex->ee_block = orig_ex.ee_block;
2899 ex->ee_len = orig_ex.ee_len;
2900 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2901 ext4_ext_mark_uninitialized(ex);
2902 ext4_ext_dirty(handle, inode, path + depth);
2907 * This function is called by ext4_ext_get_blocks() from
2908 * ext4_get_blocks_dio_write() when DIO to write
2909 * to an uninitialized extent.
2911 * Writing to an uninitized extent may result in splitting the uninitialized
2912 * extent into multiple /intialized unintialized extents (up to three)
2913 * There are three possibilities:
2914 * a> There is no split required: Entire extent should be uninitialized
2915 * b> Splits in two extents: Write is happening at either end of the extent
2916 * c> Splits in three extents: Somone is writing in middle of the extent
2918 * One of more index blocks maybe needed if the extent tree grow after
2919 * the unintialized extent split. To prevent ENOSPC occur at the IO
2920 * complete, we need to split the uninitialized extent before DIO submit
2921 * the IO. The uninitilized extent called at this time will be split
2922 * into three uninitialized extent(at most). After IO complete, the part
2923 * being filled will be convert to initialized by the end_io callback function
2924 * via ext4_convert_unwritten_extents().
2926 * Returns the size of uninitialized extent to be written on success.
2928 static int ext4_split_unwritten_extents(handle_t *handle,
2929 struct inode *inode,
2930 struct ext4_ext_path *path,
2932 unsigned int max_blocks,
2935 struct ext4_extent *ex, newex, orig_ex;
2936 struct ext4_extent *ex1 = NULL;
2937 struct ext4_extent *ex2 = NULL;
2938 struct ext4_extent *ex3 = NULL;
2939 struct ext4_extent_header *eh;
2940 ext4_lblk_t ee_block;
2941 unsigned int allocated, ee_len, depth;
2942 ext4_fsblk_t newblock;
2945 ext_debug("ext4_split_unwritten_extents: inode %lu,"
2946 "iblock %llu, max_blocks %u\n", inode->i_ino,
2947 (unsigned long long)iblock, max_blocks);
2948 depth = ext_depth(inode);
2949 eh = path[depth].p_hdr;
2950 ex = path[depth].p_ext;
2951 ee_block = le32_to_cpu(ex->ee_block);
2952 ee_len = ext4_ext_get_actual_len(ex);
2953 allocated = ee_len - (iblock - ee_block);
2954 newblock = iblock - ee_block + ext_pblock(ex);
2956 orig_ex.ee_block = ex->ee_block;
2957 orig_ex.ee_len = cpu_to_le16(ee_len);
2958 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2961 * If the uninitialized extent begins at the same logical
2962 * block where the write begins, and the write completely
2963 * covers the extent, then we don't need to split it.
2965 if ((iblock == ee_block) && (allocated <= max_blocks))
2968 err = ext4_ext_get_access(handle, inode, path + depth);
2971 /* ex1: ee_block to iblock - 1 : uninitialized */
2972 if (iblock > ee_block) {
2974 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2975 ext4_ext_mark_uninitialized(ex1);
2979 * for sanity, update the length of the ex2 extent before
2980 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2981 * overlap of blocks.
2983 if (!ex1 && allocated > max_blocks)
2984 ex2->ee_len = cpu_to_le16(max_blocks);
2985 /* ex3: to ee_block + ee_len : uninitialised */
2986 if (allocated > max_blocks) {
2987 unsigned int newdepth;
2989 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2990 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2991 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2992 ext4_ext_mark_uninitialized(ex3);
2993 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2994 if (err == -ENOSPC) {
2995 err = ext4_ext_zeroout(inode, &orig_ex);
2997 goto fix_extent_len;
2998 /* update the extent length and mark as initialized */
2999 ex->ee_block = orig_ex.ee_block;
3000 ex->ee_len = orig_ex.ee_len;
3001 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3002 ext4_ext_dirty(handle, inode, path + depth);
3003 /* zeroed the full extent */
3004 /* blocks available from iblock */
3008 goto fix_extent_len;
3010 * The depth, and hence eh & ex might change
3011 * as part of the insert above.
3013 newdepth = ext_depth(inode);
3015 * update the extent length after successful insert of the
3018 orig_ex.ee_len = cpu_to_le16(ee_len -
3019 ext4_ext_get_actual_len(ex3));
3021 ext4_ext_drop_refs(path);
3022 path = ext4_ext_find_extent(inode, iblock, path);
3024 err = PTR_ERR(path);
3027 eh = path[depth].p_hdr;
3028 ex = path[depth].p_ext;
3032 err = ext4_ext_get_access(handle, inode, path + depth);
3036 allocated = max_blocks;
3039 * If there was a change of depth as part of the
3040 * insertion of ex3 above, we need to update the length
3041 * of the ex1 extent again here
3043 if (ex1 && ex1 != ex) {
3045 ex1->ee_len = cpu_to_le16(iblock - ee_block);
3046 ext4_ext_mark_uninitialized(ex1);
3050 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
3051 * uninitialised still.
3053 ex2->ee_block = cpu_to_le32(iblock);
3054 ext4_ext_store_pblock(ex2, newblock);
3055 ex2->ee_len = cpu_to_le16(allocated);
3056 ext4_ext_mark_uninitialized(ex2);
3059 /* Mark modified extent as dirty */
3060 err = ext4_ext_dirty(handle, inode, path + depth);
3061 ext_debug("out here\n");
3064 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3065 if (err == -ENOSPC) {
3066 err = ext4_ext_zeroout(inode, &orig_ex);
3068 goto fix_extent_len;
3069 /* update the extent length and mark as initialized */
3070 ex->ee_block = orig_ex.ee_block;
3071 ex->ee_len = orig_ex.ee_len;
3072 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3073 ext4_ext_dirty(handle, inode, path + depth);
3074 /* zero out the first half */
3077 goto fix_extent_len;
3079 ext4_ext_show_leaf(inode, path);
3080 return err ? err : allocated;
3083 ex->ee_block = orig_ex.ee_block;
3084 ex->ee_len = orig_ex.ee_len;
3085 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3086 ext4_ext_mark_uninitialized(ex);
3087 ext4_ext_dirty(handle, inode, path + depth);
3090 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3091 struct inode *inode,
3092 struct ext4_ext_path *path)
3094 struct ext4_extent *ex;
3095 struct ext4_extent_header *eh;
3100 depth = ext_depth(inode);
3101 eh = path[depth].p_hdr;
3102 ex = path[depth].p_ext;
3104 err = ext4_ext_get_access(handle, inode, path + depth);
3107 /* first mark the extent as initialized */
3108 ext4_ext_mark_initialized(ex);
3111 * We have to see if it can be merged with the extent
3114 if (ex > EXT_FIRST_EXTENT(eh)) {
3116 * To merge left, pass "ex - 1" to try_to_merge(),
3117 * since it merges towards right _only_.
3119 ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3121 err = ext4_ext_correct_indexes(handle, inode, path);
3124 depth = ext_depth(inode);
3129 * Try to Merge towards right.
3131 ret = ext4_ext_try_to_merge(inode, path, ex);
3133 err = ext4_ext_correct_indexes(handle, inode, path);
3136 depth = ext_depth(inode);
3138 /* Mark modified extent as dirty */
3139 err = ext4_ext_dirty(handle, inode, path + depth);
3141 ext4_ext_show_leaf(inode, path);
3145 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3146 sector_t block, int count)
3149 for (i = 0; i < count; i++)
3150 unmap_underlying_metadata(bdev, block + i);
3154 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3155 ext4_lblk_t iblock, unsigned int max_blocks,
3156 struct ext4_ext_path *path, int flags,
3157 unsigned int allocated, struct buffer_head *bh_result,
3158 ext4_fsblk_t newblock)
3162 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3164 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3165 "block %llu, max_blocks %u, flags %d, allocated %u",
3166 inode->i_ino, (unsigned long long)iblock, max_blocks,
3168 ext4_ext_show_leaf(inode, path);
3170 /* get_block() before submit the IO, split the extent */
3171 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3172 ret = ext4_split_unwritten_extents(handle,
3173 inode, path, iblock,
3176 * Flag the inode(non aio case) or end_io struct (aio case)
3177 * that this IO needs to convertion to written when IO is
3181 io->flag = EXT4_IO_UNWRITTEN;
3183 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3184 if (ext4_should_dioread_nolock(inode))
3185 set_buffer_uninit(bh_result);
3188 /* IO end_io complete, convert the filled extent to written */
3189 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3190 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3193 ext4_update_inode_fsync_trans(handle, inode, 1);
3196 /* buffered IO case */
3198 * repeat fallocate creation request
3199 * we already have an unwritten extent
3201 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3204 /* buffered READ or buffered write_begin() lookup */
3205 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3207 * We have blocks reserved already. We
3208 * return allocated blocks so that delalloc
3209 * won't do block reservation for us. But
3210 * the buffer head will be unmapped so that
3211 * a read from the block returns 0s.
3213 set_buffer_unwritten(bh_result);
3217 /* buffered write, writepage time, convert*/
3218 ret = ext4_ext_convert_to_initialized(handle, inode,
3222 ext4_update_inode_fsync_trans(handle, inode, 1);
3229 set_buffer_new(bh_result);
3231 * if we allocated more blocks than requested
3232 * we need to make sure we unmap the extra block
3233 * allocated. The actual needed block will get
3234 * unmapped later when we find the buffer_head marked
3237 if (allocated > max_blocks) {
3238 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3239 newblock + max_blocks,
3240 allocated - max_blocks);
3241 allocated = max_blocks;
3245 * If we have done fallocate with the offset that is already
3246 * delayed allocated, we would have block reservation
3247 * and quota reservation done in the delayed write path.
3248 * But fallocate would have already updated quota and block
3249 * count for this offset. So cancel these reservation
3251 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3252 ext4_da_update_reserve_space(inode, allocated, 0);
3255 set_buffer_mapped(bh_result);
3257 if (allocated > max_blocks)
3258 allocated = max_blocks;
3259 ext4_ext_show_leaf(inode, path);
3260 bh_result->b_bdev = inode->i_sb->s_bdev;
3261 bh_result->b_blocknr = newblock;
3264 ext4_ext_drop_refs(path);
3267 return err ? err : allocated;
3270 * Block allocation/map/preallocation routine for extents based files
3273 * Need to be called with
3274 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3275 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3277 * return > 0, number of of blocks already mapped/allocated
3278 * if create == 0 and these are pre-allocated blocks
3279 * buffer head is unmapped
3280 * otherwise blocks are mapped
3282 * return = 0, if plain look up failed (blocks have not been allocated)
3283 * buffer head is unmapped
3285 * return < 0, error case.
3287 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3289 unsigned int max_blocks, struct buffer_head *bh_result,
3292 struct ext4_ext_path *path = NULL;
3293 struct ext4_extent_header *eh;
3294 struct ext4_extent newex, *ex, *last_ex;
3295 ext4_fsblk_t newblock;
3296 int err = 0, depth, ret, cache_type;
3297 unsigned int allocated = 0;
3298 struct ext4_allocation_request ar;
3299 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3301 __clear_bit(BH_New, &bh_result->b_state);
3302 ext_debug("blocks %u/%u requested for inode %lu\n",
3303 iblock, max_blocks, inode->i_ino);
3305 /* check in cache */
3306 cache_type = ext4_ext_in_cache(inode, iblock, &newex);
3308 if (cache_type == EXT4_EXT_CACHE_GAP) {
3309 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3311 * block isn't allocated yet and
3312 * user doesn't want to allocate it
3316 /* we should allocate requested block */
3317 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
3318 /* block is already allocated */
3320 - le32_to_cpu(newex.ee_block)
3321 + ext_pblock(&newex);
3322 /* number of remaining blocks in the extent */
3323 allocated = ext4_ext_get_actual_len(&newex) -
3324 (iblock - le32_to_cpu(newex.ee_block));
3331 /* find extent for this block */
3332 path = ext4_ext_find_extent(inode, iblock, NULL);
3334 err = PTR_ERR(path);
3339 depth = ext_depth(inode);
3342 * consistent leaf must not be empty;
3343 * this situation is possible, though, _during_ tree modification;
3344 * this is why assert can't be put in ext4_ext_find_extent()
3346 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3347 EXT4_ERROR_INODE(inode, "bad extent address "
3348 "iblock: %d, depth: %d pblock %lld",
3349 iblock, depth, path[depth].p_block);
3353 eh = path[depth].p_hdr;
3355 ex = path[depth].p_ext;
3357 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3358 ext4_fsblk_t ee_start = ext_pblock(ex);
3359 unsigned short ee_len;
3362 * Uninitialized extents are treated as holes, except that
3363 * we split out initialized portions during a write.
3365 ee_len = ext4_ext_get_actual_len(ex);
3366 /* if found extent covers block, simply return it */
3367 if (in_range(iblock, ee_block, ee_len)) {
3368 newblock = iblock - ee_block + ee_start;
3369 /* number of remaining blocks in the extent */
3370 allocated = ee_len - (iblock - ee_block);
3371 ext_debug("%u fit into %u:%d -> %llu\n", iblock,
3372 ee_block, ee_len, newblock);
3374 /* Do not put uninitialized extent in the cache */
3375 if (!ext4_ext_is_uninitialized(ex)) {
3376 ext4_ext_put_in_cache(inode, ee_block,
3378 EXT4_EXT_CACHE_EXTENT);
3381 ret = ext4_ext_handle_uninitialized_extents(handle,
3382 inode, iblock, max_blocks, path,
3383 flags, allocated, bh_result, newblock);
3389 * requested block isn't allocated yet;
3390 * we couldn't try to create block if create flag is zero
3392 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3394 * put just found gap into cache to speed up
3395 * subsequent requests
3397 ext4_ext_put_gap_in_cache(inode, path, iblock);
3401 * Okay, we need to do block allocation.
3404 /* find neighbour allocated blocks */
3406 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3410 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3415 * See if request is beyond maximum number of blocks we can have in
3416 * a single extent. For an initialized extent this limit is
3417 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3418 * EXT_UNINIT_MAX_LEN.
3420 if (max_blocks > EXT_INIT_MAX_LEN &&
3421 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3422 max_blocks = EXT_INIT_MAX_LEN;
3423 else if (max_blocks > EXT_UNINIT_MAX_LEN &&
3424 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3425 max_blocks = EXT_UNINIT_MAX_LEN;
3427 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
3428 newex.ee_block = cpu_to_le32(iblock);
3429 newex.ee_len = cpu_to_le16(max_blocks);
3430 err = ext4_ext_check_overlap(inode, &newex, path);
3432 allocated = ext4_ext_get_actual_len(&newex);
3434 allocated = max_blocks;
3436 /* allocate new block */
3438 ar.goal = ext4_ext_find_goal(inode, path, iblock);
3439 ar.logical = iblock;
3441 if (S_ISREG(inode->i_mode))
3442 ar.flags = EXT4_MB_HINT_DATA;
3444 /* disable in-core preallocation for non-regular files */
3446 newblock = ext4_mb_new_blocks(handle, &ar, &err);
3449 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3450 ar.goal, newblock, allocated);
3452 /* try to insert new extent into found leaf and return */
3453 ext4_ext_store_pblock(&newex, newblock);
3454 newex.ee_len = cpu_to_le16(ar.len);
3455 /* Mark uninitialized */
3456 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3457 ext4_ext_mark_uninitialized(&newex);
3459 * io_end structure was created for every IO write to an
3460 * uninitialized extent. To avoid unecessary conversion,
3461 * here we flag the IO that really needs the conversion.
3462 * For non asycn direct IO case, flag the inode state
3463 * that we need to perform convertion when IO is done.
3465 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3467 io->flag = EXT4_IO_UNWRITTEN;
3469 ext4_set_inode_state(inode,
3470 EXT4_STATE_DIO_UNWRITTEN);
3472 if (ext4_should_dioread_nolock(inode))
3473 set_buffer_uninit(bh_result);
3476 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
3477 if (unlikely(!eh->eh_entries)) {
3478 EXT4_ERROR_INODE(inode,
3479 "eh->eh_entries == 0 ee_block %d",
3484 last_ex = EXT_LAST_EXTENT(eh);
3485 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
3486 + ext4_ext_get_actual_len(last_ex))
3487 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3489 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3491 /* free data blocks we just allocated */
3492 /* not a good idea to call discard here directly,
3493 * but otherwise we'd need to call it every free() */
3494 ext4_discard_preallocations(inode);
3495 ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
3496 ext4_ext_get_actual_len(&newex), 0);
3500 /* previous routine could use block we allocated */
3501 newblock = ext_pblock(&newex);
3502 allocated = ext4_ext_get_actual_len(&newex);
3503 if (allocated > max_blocks)
3504 allocated = max_blocks;
3505 set_buffer_new(bh_result);
3508 * Update reserved blocks/metadata blocks after successful
3509 * block allocation which had been deferred till now.
3511 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3512 ext4_da_update_reserve_space(inode, allocated, 1);
3515 * Cache the extent and update transaction to commit on fdatasync only
3516 * when it is _not_ an uninitialized extent.
3518 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3519 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
3520 EXT4_EXT_CACHE_EXTENT);
3521 ext4_update_inode_fsync_trans(handle, inode, 1);
3523 ext4_update_inode_fsync_trans(handle, inode, 0);
3525 if (allocated > max_blocks)
3526 allocated = max_blocks;
3527 ext4_ext_show_leaf(inode, path);
3528 set_buffer_mapped(bh_result);
3529 bh_result->b_bdev = inode->i_sb->s_bdev;
3530 bh_result->b_blocknr = newblock;
3533 ext4_ext_drop_refs(path);
3536 return err ? err : allocated;
3539 void ext4_ext_truncate(struct inode *inode)
3541 struct address_space *mapping = inode->i_mapping;
3542 struct super_block *sb = inode->i_sb;
3543 ext4_lblk_t last_block;
3548 * probably first extent we're gonna free will be last in block
3550 err = ext4_writepage_trans_blocks(inode);
3551 handle = ext4_journal_start(inode, err);
3555 if (inode->i_size & (sb->s_blocksize - 1))
3556 ext4_block_truncate_page(handle, mapping, inode->i_size);
3558 if (ext4_orphan_add(handle, inode))
3561 down_write(&EXT4_I(inode)->i_data_sem);
3562 ext4_ext_invalidate_cache(inode);
3564 ext4_discard_preallocations(inode);
3567 * TODO: optimization is possible here.
3568 * Probably we need not scan at all,
3569 * because page truncation is enough.
3572 /* we have to know where to truncate from in crash case */
3573 EXT4_I(inode)->i_disksize = inode->i_size;
3574 ext4_mark_inode_dirty(handle, inode);
3576 last_block = (inode->i_size + sb->s_blocksize - 1)
3577 >> EXT4_BLOCK_SIZE_BITS(sb);
3578 err = ext4_ext_remove_space(inode, last_block);
3580 /* In a multi-transaction truncate, we only make the final
3581 * transaction synchronous.
3584 ext4_handle_sync(handle);
3587 up_write(&EXT4_I(inode)->i_data_sem);
3589 * If this was a simple ftruncate() and the file will remain alive,
3590 * then we need to clear up the orphan record which we created above.
3591 * However, if this was a real unlink then we were called by
3592 * ext4_delete_inode(), and we allow that function to clean up the
3593 * orphan info for us.
3596 ext4_orphan_del(handle, inode);
3598 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3599 ext4_mark_inode_dirty(handle, inode);
3600 ext4_journal_stop(handle);
3603 static void ext4_falloc_update_inode(struct inode *inode,
3604 int mode, loff_t new_size, int update_ctime)
3606 struct timespec now;
3609 now = current_fs_time(inode->i_sb);
3610 if (!timespec_equal(&inode->i_ctime, &now))
3611 inode->i_ctime = now;
3614 * Update only when preallocation was requested beyond
3617 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3618 if (new_size > i_size_read(inode))
3619 i_size_write(inode, new_size);
3620 if (new_size > EXT4_I(inode)->i_disksize)
3621 ext4_update_i_disksize(inode, new_size);
3624 * Mark that we allocate beyond EOF so the subsequent truncate
3625 * can proceed even if the new size is the same as i_size.
3627 if (new_size > i_size_read(inode))
3628 EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL;
3634 * preallocate space for a file. This implements ext4's fallocate inode
3635 * operation, which gets called from sys_fallocate system call.
3636 * For block-mapped files, posix_fallocate should fall back to the method
3637 * of writing zeroes to the required new blocks (the same behavior which is
3638 * expected for file systems which do not support fallocate() system call).
3640 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3645 unsigned int max_blocks;
3649 struct buffer_head map_bh;
3650 unsigned int credits, blkbits = inode->i_blkbits;
3653 * currently supporting (pre)allocate mode for extent-based
3656 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3659 /* preallocation to directories is currently not supported */
3660 if (S_ISDIR(inode->i_mode))
3663 block = offset >> blkbits;
3665 * We can't just convert len to max_blocks because
3666 * If blocksize = 4096 offset = 3072 and len = 2048
3668 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3671 * credits to insert 1 extent into extent tree
3673 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3674 mutex_lock(&inode->i_mutex);
3676 while (ret >= 0 && ret < max_blocks) {
3677 block = block + ret;
3678 max_blocks = max_blocks - ret;
3679 handle = ext4_journal_start(inode, credits);
3680 if (IS_ERR(handle)) {
3681 ret = PTR_ERR(handle);
3685 ret = ext4_get_blocks(handle, inode, block,
3686 max_blocks, &map_bh,
3687 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3691 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3692 "returned error inode#%lu, block=%u, "
3693 "max_blocks=%u", __func__,
3694 inode->i_ino, block, max_blocks);
3696 ext4_mark_inode_dirty(handle, inode);
3697 ret2 = ext4_journal_stop(handle);
3700 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3701 blkbits) >> blkbits))
3702 new_size = offset + len;
3704 new_size = (block + ret) << blkbits;
3706 ext4_falloc_update_inode(inode, mode, new_size,
3707 buffer_new(&map_bh));
3708 ext4_mark_inode_dirty(handle, inode);
3709 ret2 = ext4_journal_stop(handle);
3713 if (ret == -ENOSPC &&
3714 ext4_should_retry_alloc(inode->i_sb, &retries)) {
3718 mutex_unlock(&inode->i_mutex);
3719 return ret > 0 ? ret2 : ret;
3723 * This function convert a range of blocks to written extents
3724 * The caller of this function will pass the start offset and the size.
3725 * all unwritten extents within this range will be converted to
3728 * This function is called from the direct IO end io call back
3729 * function, to convert the fallocated extents after IO is completed.
3730 * Returns 0 on success.
3732 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3737 unsigned int max_blocks;
3740 struct buffer_head map_bh;
3741 unsigned int credits, blkbits = inode->i_blkbits;
3743 block = offset >> blkbits;
3745 * We can't just convert len to max_blocks because
3746 * If blocksize = 4096 offset = 3072 and len = 2048
3748 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3751 * credits to insert 1 extent into extent tree
3753 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3754 while (ret >= 0 && ret < max_blocks) {
3755 block = block + ret;
3756 max_blocks = max_blocks - ret;
3757 handle = ext4_journal_start(inode, credits);
3758 if (IS_ERR(handle)) {
3759 ret = PTR_ERR(handle);
3763 ret = ext4_get_blocks(handle, inode, block,
3764 max_blocks, &map_bh,
3765 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3768 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3769 "returned error inode#%lu, block=%u, "
3770 "max_blocks=%u", __func__,
3771 inode->i_ino, block, max_blocks);
3773 ext4_mark_inode_dirty(handle, inode);
3774 ret2 = ext4_journal_stop(handle);
3775 if (ret <= 0 || ret2 )
3778 return ret > 0 ? ret2 : ret;
3781 * Callback function called for each extent to gather FIEMAP information.
3783 static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3784 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3787 struct fiemap_extent_info *fieinfo = data;
3788 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3795 logical = (__u64)newex->ec_block << blksize_bits;
3797 if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3800 struct buffer_head *bh = NULL;
3802 offset = logical >> PAGE_SHIFT;
3803 page = find_get_page(inode->i_mapping, offset);
3804 if (!page || !page_has_buffers(page))
3805 return EXT_CONTINUE;
3807 bh = page_buffers(page);
3810 return EXT_CONTINUE;
3812 if (buffer_delay(bh)) {
3813 flags |= FIEMAP_EXTENT_DELALLOC;
3814 page_cache_release(page);
3816 page_cache_release(page);
3817 return EXT_CONTINUE;
3821 physical = (__u64)newex->ec_start << blksize_bits;
3822 length = (__u64)newex->ec_len << blksize_bits;
3824 if (ex && ext4_ext_is_uninitialized(ex))
3825 flags |= FIEMAP_EXTENT_UNWRITTEN;
3828 * If this extent reaches EXT_MAX_BLOCK, it must be last.
3830 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3831 * this also indicates no more allocated blocks.
3833 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3835 if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3836 newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
3837 loff_t size = i_size_read(inode);
3838 loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
3840 flags |= FIEMAP_EXTENT_LAST;
3841 if ((flags & FIEMAP_EXTENT_DELALLOC) &&
3842 logical+length > size)
3843 length = (size - logical + bs - 1) & ~(bs-1);
3846 error = fiemap_fill_next_extent(fieinfo, logical, physical,
3853 return EXT_CONTINUE;
3856 /* fiemap flags we can handle specified here */
3857 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3859 static int ext4_xattr_fiemap(struct inode *inode,
3860 struct fiemap_extent_info *fieinfo)
3864 __u32 flags = FIEMAP_EXTENT_LAST;
3865 int blockbits = inode->i_sb->s_blocksize_bits;
3869 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3870 struct ext4_iloc iloc;
3871 int offset; /* offset of xattr in inode */
3873 error = ext4_get_inode_loc(inode, &iloc);
3876 physical = iloc.bh->b_blocknr << blockbits;
3877 offset = EXT4_GOOD_OLD_INODE_SIZE +
3878 EXT4_I(inode)->i_extra_isize;
3880 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3881 flags |= FIEMAP_EXTENT_DATA_INLINE;
3883 } else { /* external block */
3884 physical = EXT4_I(inode)->i_file_acl << blockbits;
3885 length = inode->i_sb->s_blocksize;
3889 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3891 return (error < 0 ? error : 0);
3894 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3895 __u64 start, __u64 len)
3897 ext4_lblk_t start_blk;
3900 /* fallback to generic here if not in extents fmt */
3901 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3902 return generic_block_fiemap(inode, fieinfo, start, len,
3905 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3908 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3909 error = ext4_xattr_fiemap(inode, fieinfo);
3911 ext4_lblk_t len_blks;
3914 start_blk = start >> inode->i_sb->s_blocksize_bits;
3915 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3916 if (last_blk >= EXT_MAX_BLOCK)
3917 last_blk = EXT_MAX_BLOCK-1;
3918 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3921 * Walk the extent tree gathering extent information.
3922 * ext4_ext_fiemap_cb will push extents back to user.
3924 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3925 ext4_ext_fiemap_cb, fieinfo);